现在的位置: 首页 > 综合 > 正文

3D数学之柏林噪声(Perlin Noise)

2013年06月08日 ⁄ 综合 ⁄ 共 47780字 ⁄ 字号 评论关闭

经过四天的努力,终于自己实现了3D柏林噪声,当第一次用它成功渲染出茶壶的时候,感觉自己跟《当幸福来敲门》的男主角chris一样,当时不由自主为自己鼓起了掌.

4天时间啊,这4天时间基本上没有背单词,白天一个人去教室面对冰冷的代码.晚上回寝室熬夜到2,3点,到处在网上找资料,找源码,但是当自己把茶壶渲染出来的时候,突然觉得这一切都是值得的,因为它那斑驳的颜色温暖了我的心!!!

好了,转入正题.

其实它的原理并不是很难,但是由于网上实现的版本太多太杂,真要实现起来竟然不知从何处下手,而且自己写的时候会遇到各种各样的问题.最终写出来了,所以很欣然.

先看下,我在网上找的一些资料吧

另外我还下载了libnoise源码,可以说这个是我能成功渲染出茶壶的关键.后面我会提到.

好了,这里我只讲1到3D的柏林噪声,4D的我没有实现,实在没有时间,不能在这个环节浪费太多了时间,因为项目里面只用到了2D的柏林噪声来渲染海面.

由于网上理论资料也很详细,我这里主要讲解实现过程中需要注意的地方.

先简单提一下理论,

Perlin噪声由多个coherent noise组成,每一个coherent noise称为一个octave.

octave越多,Perlin噪声就越细致,但计算时间也越长。

具体理论知识见

http://blog.sina.com.cn/s/blog_68f6e8a901013t7d.html

http://www.cnblogs.com/babyrender/archive/2008/10/27/BabyRender.html

coherent 噪声函数

输入为对应维数的向量(对于一维来说就是一个float,对于二维来说就是一个float[2],以此类推)

输出为[-1,1]的float

现在拿一维来举例,首选我们需要在值为整数的点为其生成随机的梯度(1维下是斜率),然后找出输入的float数f左边和右边的两个整数x0,x1,然后找到x0,x1到的梯度g0,g1,算出点f到点x0和x1的向量r0 = f - x0, r1 = f - x1 =  r0 - 1,最后分别将两个整数点的梯度与f到这两个整数点的向量相乘,得到u,v两个值,最后需要通过r0算出混合权值h,公式为

h = s_curve_new(r0),最后得到范围在[-1,1]的随机数 u + h * (v - u)

#define s_curve(t) ( t * t * (3. - 2. * t) ) //即3t^2 - 2t^3 ,旧版本
#define s_curve_new(t) ( t * t * t * (t * (t * 6 - 15) + 10) )  //即 6t^5 - 15t^4 + 10t^3,新版本

好了,下面就一个版本一个版本的讲吧

首先是两个官方版本:

http://mrl.nyu.edu/~perlin/noise

第一个版本,只实现了3D的coherent noise

下面是我自己的代码

/*------------------------------------------------------------
main.cpp -- http://mrl.nyu.edu/~perlin/noise/
(c) Seamanj.2013/9/6
------------------------------------------------------------*/
#include <iostream>
#include <cmath>
using namespace std;

int* p;

int permutation[] = { 151,160,137,91,90,15,
131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23,
190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33,
88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166,
77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,
102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196,
135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123,
5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42,
223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9,
129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228,
251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107,
49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254,
138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180
};


static double fade(double t) { return t * t * t * (t * (t * 6 - 15) + 10); }
static double lerp(double t, double a, double b) { return a + t * (b - a); }
static double grad(int hash, double x, double y, double z) //注意梯度产生[-1,1]的范围
{
	int h = hash & 15;                      // CONVERT LO 4 BITS OF HASH CODE INTO 12 GRADIENT DIRECTIONS.
	double u = h<8 ? x : y,v = h<4 ? y : h==12||h==14 ? x : z;
	return ((h&1) == 0 ? u : -u) + ((h&2) == 0 ? v : -v);//+号左边有四种情况,右边有八种情况
}



double Noise3D(double x, double y, double z)//输出范围为[-1,1]
{
	int iX = (int)floor(x) & 255,
		iY = (int)floor(y) & 255,
		iZ = (int)floor(z) & 255;
	x -= floor(x);
	y -= floor(y);
	z -= floor(z);
	double u = fade(x),
		  v = fade(y),
		  w = fade(z);
	//index∈[0,511]   p[index]∈[0,255]  iX,iY,iZ ∈ [0,255]
	int A = p[iX] + iY, AA = p[A] + iZ, AB = p[A+1] + iZ,
		B = p[iX + 1] + iY, BA = p[B] + iZ, BB = p[B + 1] + iZ;

	return lerp(w, lerp(v, 
						lerp(u, grad(p[AA], x, y, z), grad(p[BA], x - 1, y, z)),
						lerp(u, grad(p[AB], x, y - 1, z), grad(p[BB], x - 1, y - 1, z))),
				    lerp(v,
						lerp(u, grad(p[AA + 1], x, y, z - 1), grad(p[BA + 1], x - 1, y, z - 1)),
						lerp(u, grad(p[AB + 1], x, y - 1, z - 1), grad(p[BB + 1], x - 1, y - 1, z - 1))));
}
int main()
{
	p = new int[512];
	for (int i=0; i < 256 ; i++)
		p[256+i] = p[i] = permutation[i]; //这里搞成两个是因为后面插值的时候+1了,有可能加到256去了
	for(double d1 = -10, d2 = -10, d3 = -10; d1 <= 10 && d2 <= 10 && d3 <= 10; d1 += 0.01, d2 += 0.01, d3 += 0.01)
		cout << Noise3D(d1, d2, d3) << endl;
	delete [] p;
	return 0;
}

运行结果:

这个结果表面看上去能接受,但是要拿来真正渲染的时候就会出问题,所以我们需要对输入的数据进行处理,后面我会讲

第二个版本比较详细,实现了1到3D的noise

http://mrl.nyu.edu/~perlin/doc/oscar.html

下面我的代码

/*------------------------------------------------------------
main.cpp -- http://mrl.nyu.edu/~perlin/doc/oscar.html
(c) Seamanj.2013/9/6
------------------------------------------------------------*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>


#define B 0x100        //256
#define BM 0xff			//255

#define N 0x1000		//4096
#define NP 12			// 2^N 
#define NM 0xfff

static int p[ B + B + 2];
static float g3[B + B + 2][3];
static float g2[B + B + 2][2];
static float g1[B + B +2];
static int start = 1;//是否需要初始化


static void init(void);

#define s_curve(t) ( t * t * (3. - 2. * t) )
#define s_curve_new(t) ( t * t * t * (t * (t * 6 - 15) + 10) )
#define lerp(t, a, b) ( a + t * (b - a) )

// b0为小于vec[i]的整数,b1为大于vec[i]的整数,r0为中间点到b0的符号距离(为正),r1为中间点到b1的符号距离(为负)
#define setup(i, b0, b1, r0, r1) \
	t = vec[i] + N;\
	b0 = ((int)t) & BM;\
	b1 = (b0+1) & BM;\
	r0 = t - (int)t;\
	r1 = r0 - 1;

double noise1(double arg)//输出范围[-1,1]
{
	int bx0, bx1;
	float rx0, rx1, sx, t, u, v ,vec[1];

	vec[0] = arg;
	if(start)
	{
		start = 0;
		init();
	}
	setup(0, bx0, bx1, rx0, rx1 );
	
	sx = s_curve(rx0);//根据rx0算出混合权值,这里采用的是Hermit插值,老版本了
					  //新版的公式为:weight = t * t * t * ( t * ( t * 6 - 15 ) + 10 );
	//sx = s_curve_new(rx0);


	u = rx0 * g1[ p[ bx0 ] ];//扰动下bx0,让bx0处梯度更加随机些
	v = rx1 * g1[ p[ bx1 ] ];

	return lerp(sx, u, v);//最后进行插值
}

float noise2(float vec[2])//vec[0]为x,vec[1]为y
{
	int bx0, bx1, by0, by1, b00, b10, b01, b11;
	float rx0, rx1, ry0, ry1, *q, sx, sy, a, b, u, v, t;
	register int i,j;
	if( start )
	{
		start = 0;
		init();
	}
	setup(0, bx0, bx1, rx0, rx1);
	setup(1, by0, by1, ry0, ry1);
	i = p[bx0];
	j = p[bx1];

	b00 = p[i+by0];//得到四个随机值
	b10 = p[j+by0];
	b01 = p[i+by1];
	b11 = p[j+by1];

	sx = s_curve(rx0);//算出混合权值
	sy = s_curve(ry0);

#define at2(rx,ry) ( rx * q[0] + ry * q[1] )

// 								   by1
// 								   ↑
// 					               ry1
// 								   ↓
// 			 	         bx0←rx0→⊕←rx1→bx1						
// 			+y					   ↑
// 			↑		               ry0
// 			  →+x				   ↓
// 			 			           by0
	q = g2[b00]; u = at2(rx0, ry0);//将(bx0,by0)处的梯度与中间点到该点的向量(rx0,ry0)相乘
	q = g2[b10]; v = at2(rx1, ry0);//将(bx1,by0)处的梯度与中间点到该点的向量(rx1,ry0)相乘
	a = lerp(sx, u, v);

	q = g2[b01]; u = at2(rx0, ry1);//将(bx0,by1)处的梯度与中间点到该点的向量(rx0,ry1)相乘
	q = g2[b11]; v = at2(rx1, ry1);//将(bx1,by1)处的梯度与中间点到该点的向量(rx1,ry1)相乘
	b = lerp(sx, u, v);

	return lerp(sy, a ,b);//进行双线性插值
	
}
float noise3(float vec[3])
{
	int bx0, bx1, by0, by1, bz0, bz1, b00, b10, b01, b11;
	float rx0, rx1, ry0, ry1, rz0, rz1, *q, sx, sy, sz, a, b, c, d, u, v, t;
	register int i, j;
	if (start)
	{
		start = 0;
		init();
	}
	setup(0, bx0, bx1, rx0, rx1);
	setup(1, by0, by1, ry0, ry1);
	setup(2, bz0, bz1, rz0, rz1);

	i = p[bx0];
	j = p[bx1];

	b00 = p[i + by0];
	b10 = p[j + by0];
	b01 = p[i + by1];
	b11 = p[j + by1];//产生四个随机数,和bz0,bz1组合成8个随机数

	sx = s_curve(rx0);
	sy = s_curve(ry0);
	sz = s_curve(rz0);

#define at3(rx, ry, rz) ( rx * q[0] + ry * q[1] + rz * q[2] )
// 								   by1
// 								   ↑
// 								   丨      bz1
// 					               ry1   ↗
// 								   丨  rz1
// 								   ↓↙
// 			 	         bx0←rx0→⊕←rx1→bx1						
// 								 ↗↑
// 							   rz0 丨
// 			+y  +z	         ↙    ry0
// 			↑↗ 			bz0	   丨
// 			  →+x			       ↓
// 			 			           by0

	q = g3[b00 + bz0]; u = at3(rx0, ry0, rz0);//将(bx0,by0,bz0)处的梯度与中间点到该点的向量(rx0,ry0,rz0)相乘
	q = g3[b10 + bz0]; v = at3(rx1, ry0, rz0);//将(bx1,by0,bz0)处的梯度与中间点到该点的向量(rx1,ry0,rz0)相乘
	a = lerp(sx, u, v);

	q = g3[b01 + bz0]; u = at3(rx0, ry1, rz0);//将(bx0,by1,bz0)处的梯度与中间点到该点的向量(rx0,ry1,rz0)相乘
	q = g3[b11 + bz0]; v = at3(rx1, ry1, rz0);//将(bx1,by1,bz0)处的梯度与中间点到该点的向量(rx1,ry1,rz0)相乘
	b = lerp(sx, u, v);

	c = lerp(sy, a, b);

	q = g3[b00 + bz1]; u = at3(rx0, ry0, rz1);//将(bx0,by0,bz1)处的梯度与中间点到该点的向量(rx0,ry0,rz1)相乘
	q = g3[b10 + bz1]; v = at3(rx1, ry0, rz1);//将(bx1,by0,bz1)处的梯度与中间点到该点的向量(rx1,ry0,rz1)相乘
	a = lerp(sx, u, v);

	q = g3[b01 + bz1]; u = at3(rx0, ry1, rz1);//将(bx0,by1,bz1)处的梯度与中间点到该点的向量(rx0,ry1,rz1)相乘
	q = g3[b11 + bz1]; v = at3(rx1, ry1, rz1);//将(bx1,by1,bz1)处的梯度与中间点到该点的向量(rx1,ry1,rz1)相乘
	b = lerp(sx, u, v);

	d = lerp(sy, a, b);

	return lerp(sz, c, d);//进行三次线性插值
	

}
static void normalize2(float v[2])
{
	float s;
	s = sqrt(v[0] * v[0] + v[1] * v[1]);
	v[0] = v[0] / s;
	v[1] = v[1] / s;
}
static void normalize3(float v[3])
{
	float s;
	s = sqrt(v[0] * v[0] + v[1] * v[1] + v[2] * v[2]);
	v[0] = v[0] / s;
	v[1] = v[1] / s;
	v[2] = v[2] / s;
}
static void init(void)
{
	int i, j, k;
	for( i = 0; i < B; i++)  // [0 , 255]
	{
		p[i] = i;
		g1[i] = (float)((rand() % (B + B)) - B) / B;// [0 , 511] - 256 ->  [-256 , 255] / 256 -> [-1 , 255/256 = 0.99609375]
		for( j = 0; j < 2 ; j++)
			g2[i][j] = (float)((rand() % (B + B)) - B) / B;//[-1 , 0.99609375]
		normalize2(g2[i]);
		for( j = 0; j < 3; j++)
			g3[i][j] = (float)((rand() % (B + B )) - B) / B;//[-1 , 0.99609375]
		normalize3(g3[i]);
	}//为整数点生成随机的梯度

	while( --i ) //[255,1]
	{
		k = p[i];
		p[i] = p[ j = rand() % B];// j∈[0,255]
		p[j] = k;
	}//使p前256个数随机,数的范围为[0,255]
	//注意p的范围为[0,513]
	for( i = 0; i < B + 2; i++)//[0,257]
	{
		p[B + i] = p[i];// 0->256,1->257,...,255->511,256->512,257->513
		//所以最后p[0~255]序列中为[0,255]的随机数,p[256~511]为p[0~255]的拷贝,p[512],p[513]为p[0],p[1]的拷贝
		g1[B + i] = g1[i];
		for( j = 0; j < 2; j++)
			g2[B + 1][j] = g2[i][j];
		for( j = 0; j < 3; j++)
			g3[B + i][j] = g3[i][j];
		//同理,将1~3维的随机梯度也按照上面的方法处理
	}
}


int main()
{
// 	for(double d = -100.0; d < 100.0; d += 0.001)
// 		printf("%lf\n", noise1(d) );
// 	for(double d1 = -100.0,d2 = -100.0 ; d1 < 100 && d2 < 100 ; d1 += 0.001, d2 += 0.001)
// 	{
// 		float vec[2] = {d1,d2};
// 		printf("%lf\n", noise2(vec));
// 	}
	for(double d1 = -100.0,d2 = -100.0,d3 = -100.0 ; d1 < 100 && d2 < 100 && d3 < 100 ; d1 += 0.001, d2 += 0.001, d3 += 0.001)
	{
		float vec[3] = {d1,d2,d3};
		printf("%lf\n", noise3(vec));
	}
}

输出结果:

看了一下输出结果,不是太理想,绝对值超过0.5的数几乎没有.哎,官方版本也坑啊!!!!!!尽管如此,我的茶壶例子还是基本这个版本的3D noise写的,谁叫它是官方版本呢,

但是我看了下libnoise的输出,0.6,0.7都很常见,以后估计我会用libnoise这个库

/*------------------------------------------------------------
main.cpp -- use libnoise to generate noise
(c) Seamanj.2013/9/6
------------------------------------------------------------*/
#include <iostream>
#include <noise.h>
#include "noiseutils.h"
using namespace noise;
#define lecture1 1
#define lecture2 0
int main (int argc, char** argv)
{
	module::Perlin myModule;
#if lecture1
	for(double d1 = -100.0,d2 = -100.0,d3 = -100.0 ; d1 < 100 && d2 < 100 && d3 < 100 ; d1 += 0.001, d2 += 0.001, d3 += 0.001)
	{
		 
		std::cout << myModule.GetValue (d1,d2,d3) << std::endl;
	}

#endif
#if lecture2
	utils::NoiseMap heightMap;
	utils::NoiseMapBuilderPlane heightMapBuilder;
	heightMapBuilder.SetSourceModule (myModule);
	heightMapBuilder.SetDestNoiseMap (heightMap);
	heightMapBuilder.SetDestSize (256, 256);
	heightMapBuilder.SetBounds (2.0, 6.0, 1.0, 5.0);
	heightMapBuilder.Build ();

	utils::RendererImage renderer;
	utils::Image image;
	renderer.SetSourceNoiseMap (heightMap);
	renderer.SetDestImage (image);
	renderer.Render ();
	utils::WriterBMP writer;
	writer.SetSourceImage (image);
	writer.SetDestFilename ("tutorial.bmp");
	writer.WriteDestFile ();
#endif
	return 0;
}

另外,我根据<<三维游戏引擎设计技术及其应用>>这本书的提示生成了一个2D Perlin noise渲染了上面Displacement mapping讲过的海洋,只不过这里的移位贴图是我们通过2D Perlin noise生成的,不是预先设定的,虽然版本有点山寨,但是效果还是可以接受

它的倍频的采样方法有点特别,最低倍频,波长最长,振幅最大,如果我们需要生成129*129的2D图(它的方法最好采用2^n + 1的大小的图),最低倍频,我们设波长为32,那么我们就分别

在0,32,64,96,128处进行采样(也就是生成0到振幅的随机数),然后中间的进行以这些点进行插值,当为2倍频时,周期减少一倍为16,则在,0,16,32,48,64,80,96,112,128处进行采样,然后进行插值,注意采样时这里振幅也会减少一倍.后面的依次类推,最后将它们全部累加得到的就是2D的perlin noise

用这个版本渲染的海洋

用这个版本生成的海洋以及对应的位移贴图,颜色越白,海面越高

用这个版本生成 的2D Perlin noise(即移位贴图,只不过是两张Perlin noise按照某一权值比重进行累加后的结果)

下面给出这个DEMO的源代码

/*------------------------------------------------------------
	3D_Math_2DPerlinNoise_ocean.cpp -- achieve perlin noise
			(c) Seamanj.2013/9/4
------------------------------------------------------------*/
#include "DXUT.h"
#include "resource.h"

// phase1 : add camera
// phase2 : add sky box
// phase3 : add grid mesh
// phase4 : add FX
// phase5 : add perlin noise
// phase6 : add flat technique
#define phase1 1
#define phase2 1
#define phase3 1
#define phase4 1
#define phase5 1
#define phase6 1
#if phase1
#include "DXUTcamera.h"
CFirstPersonCamera g_Camera;
#endif
#if phase2
// Vertex Buffer
LPDIRECT3DVERTEXBUFFER9 g_pVB = NULL;
// Index Buffer
LPDIRECT3DINDEXBUFFER9 g_pIB = NULL;
PDIRECT3DCUBETEXTURE9	g_pCubeTex = 0;
#endif
#if phase3
#include <vector>
int g_iVerticesNumPerRow = 128 + 1;
int g_iVerticesNumPerCol = 128 + 1;
float g_fDeltaX = 0.25f;
float g_fDeltaZ = 0.25f;

const float EPSILON  = 0.001f;
IDirect3DVertexDeclaration9* g_pVertexDecl;
ID3DXMesh* g_pMesh;
// The two normal maps to scroll.
IDirect3DTexture9* g_pNormalTex1;
IDirect3DTexture9* g_pNormalTex2;

// The two displacement maps to scroll.
IDirect3DTexture9* g_pDisplacementTex1;
IDirect3DTexture9* g_pDisplacementTex2;

//===============================================================
// Colors and Materials

const D3DXCOLOR WHITE(1.0f, 1.0f, 1.0f, 1.0f);
const D3DXCOLOR BLACK(0.0f, 0.0f, 0.0f, 1.0f);
const D3DXCOLOR RED(1.0f, 0.0f, 0.0f, 1.0f);
const D3DXCOLOR GREEN(0.0f, 1.0f, 0.0f, 1.0f);
const D3DXCOLOR BLUE(0.0f, 0.0f, 1.0f, 1.0f);


struct DirectionalLight
{
	D3DXCOLOR ambient;
	D3DXCOLOR diffuse;
	D3DXCOLOR specular;
	D3DXVECTOR3 directionInWorld;
};


struct Material
{
	Material()
		:ambient(WHITE), diffuse(WHITE), specular(WHITE), specularPower(8.0f){}
	Material(const D3DXCOLOR& a, const D3DXCOLOR& d, 
		 const D3DXCOLOR& s, float power)
		:ambient(a), diffuse(d), specular(s), specularPower(power){}

	D3DXCOLOR ambient;
	D3DXCOLOR diffuse;
	D3DXCOLOR specular;
	float specularPower;
};


DirectionalLight g_structDirectionalLight;


Material g_structMaterial;


D3DXVECTOR2 g_scaleHeights = D3DXVECTOR2(0.5f, 0.5f);
float g_fTexScale = 8.0f;

D3DXVECTOR2 g_normalMapVelocity1 = D3DXVECTOR2(0.05f, 0.07f);
D3DXVECTOR2 g_normalMapVelocity2 = D3DXVECTOR2(-0.01f, 0.13f);
D3DXVECTOR2 g_displacementMapVelocity1 = D3DXVECTOR2(0.012f, 0.015f);
D3DXVECTOR2 g_displacementMapVelocity2 = D3DXVECTOR2(0.014f, 0.05f);

// Offset of normal maps for scrolling (vary as a function of time)
D3DXVECTOR2 g_normalMapOffset1(0.0f, 0.0f);
D3DXVECTOR2 g_normalMapOffset2(0.0f, 0.0f);

// Offset of displacement maps for scrolling (vary as a function of time)
D3DXVECTOR2 g_displacementMapOffset1(0.0f, 0.0f);
D3DXVECTOR2 g_displacementMapOffset2(0.0f, 0.0f);


#endif

#if phase4
#include "SDKmisc.h"//加载文件时会用到
ID3DXEffect*		g_pEffect = NULL;       // D3DX effect interface
D3DXHANDLE			g_hTech = 0;
#if phase6
D3DXHANDLE			g_hFlatTech = 0;
#endif
D3DXHANDLE			g_hWorld = 0;
D3DXHANDLE			g_hWorldInv = 0;
D3DXHANDLE			g_hWorldViewProj = 0;
D3DXHANDLE			g_hEyePositionInWorld = 0;
D3DXHANDLE			g_hDirectionalLightStruct = 0;
D3DXHANDLE			g_hMaterialStruct = 0;
D3DXHANDLE			g_hNormalTex1 = 0;
D3DXHANDLE			g_hNormalTex2 = 0;
D3DXHANDLE			g_hDisplacementTex1 = 0;
D3DXHANDLE			g_hDisplacementTex2 = 0;
D3DXHANDLE			g_hNormalOffset1 = 0;
D3DXHANDLE			g_hNormalOffset2 = 0;
D3DXHANDLE			g_hDisplacementOffset1 = 0;
D3DXHANDLE			g_hDisplacementOffset2 = 0;
D3DXHANDLE			g_hScaleHeights = 0; //缩放移位1,2纹理得到的高度
D3DXHANDLE			g_hDelta = 0;	//网格X,Z轴方向的步长

#endif
#if phase5
#include "Perlin2D.h"
Perlin2D myPerlinNoise(4, 32, 256, 0.5);
#endif
//--------------------------------------------------------------------------------------
// Rejects any D3D9 devices that aren't acceptable to the app by returning false
//--------------------------------------------------------------------------------------
bool CALLBACK IsD3D9DeviceAcceptable( D3DCAPS9* pCaps, D3DFORMAT AdapterFormat, D3DFORMAT BackBufferFormat,
                                      bool bWindowed, void* pUserContext )
{
    // Typically want to skip back buffer formats that don't support alpha blending
    IDirect3D9* pD3D = DXUTGetD3D9Object();
    if( FAILED( pD3D->CheckDeviceFormat( pCaps->AdapterOrdinal, pCaps->DeviceType,
                                         AdapterFormat, D3DUSAGE_QUERY_POSTPIXELSHADER_BLENDING,
                                         D3DRTYPE_TEXTURE, BackBufferFormat ) ) )
        return false;

    return true;
}


//--------------------------------------------------------------------------------------
// Before a device is created, modify the device settings as needed
//--------------------------------------------------------------------------------------
bool CALLBACK ModifyDeviceSettings( DXUTDeviceSettings* pDeviceSettings, void* pUserContext )
{
#if phase1
	pDeviceSettings->d3d9.pp.PresentationInterval = D3DPRESENT_INTERVAL_IMMEDIATE;
#endif
    return true;
}
#if phase3
//create Vertex Declaration
HRESULT createVertexDeclaration( IDirect3DDevice9* pd3dDevice )
{

	D3DVERTEXELEMENT9 decl[] = 
	{
		// offsets in bytes
		{0, 0,  D3DDECLTYPE_FLOAT3, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_POSITION, 0},
		{0, 12, D3DDECLTYPE_FLOAT2, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_TEXCOORD, 0},
		{0, 20, D3DDECLTYPE_FLOAT2, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_TEXCOORD, 1},
		D3DDECL_END()
	};

	return pd3dDevice->CreateVertexDeclaration(decl, &g_pVertexDecl);

}
struct GridVertexFormat
{
	D3DXVECTOR3 pos;
	D3DXVECTOR2 scaledTexCoord;     // [a, b]
	D3DXVECTOR2 normalizedTexCoord; // [0, 1]
};
//将顶点位置以及索引写入指定的vector中,为后面写入缓冲区打下基础
void writeVertexPosAndIndicesToVectors(int iVerticesNumPerRow, int iVerticesNumPerCol,float fDeltaX, float fDeltaZ, 
				const D3DXVECTOR3& center,std::vector<D3DXVECTOR3>& vecVertexPosData,
				std::vector<DWORD>& vecIndexData)
{
	int iVerticesNum = iVerticesNumPerRow * iVerticesNumPerCol;
	int iCellsNumPerRow = iVerticesNumPerRow - 1;
	int iCellsNumPerCol = iVerticesNumPerCol - 1;

	int iTrianglesNum = iCellsNumPerRow * iCellsNumPerCol  * 2;

	float fWidth = (float)iCellsNumPerCol * fDeltaX;
	float fDepth = (float)iCellsNumPerRow * fDeltaZ;

	//===========================================
	// Build vertices.

	// We first build the grid geometry centered about the origin and on
	// the xz-plane, row-by-row and in a top-down fashion.  We then translate
	// the grid vertices so that they are centered about the specified 
	// parameter 'center'.从左上角开始,列序优先的方式放置顶点数据

	vecVertexPosData.resize( iVerticesNum );

	// Offsets to translate grid from quadrant 4 to center of 
	// coordinate system.
	float fOffsetX = -fWidth * 0.5f; 
	float fOffsetZ =  fDepth * 0.5f;

	int k = 0;
	for(float i = 0; i < iVerticesNumPerRow; ++i)
	{
		for(float j = 0; j < iVerticesNumPerCol; ++j)
		{
			// Negate the depth coordinate to put in quadrant four.  
			// Then offset to center about coordinate system.
			vecVertexPosData[k].x =  j * fDeltaX + fOffsetX;
			vecVertexPosData[k].z = -i * fDeltaZ + fOffsetZ;
			vecVertexPosData[k].y =  0.0f;

			// Translate so that the center of the grid is at the
			// specified 'center' parameter.
			D3DXMATRIX T;
			D3DXMatrixTranslation(&T, center.x, center.y, center.z);
			D3DXVec3TransformCoord(&vecVertexPosData[k], &vecVertexPosData[k], &T);
			
			++k; // Next vertex
		}
	}

	//===========================================
	// Build indices.

	vecIndexData.resize(iTrianglesNum * 3);
	 
	// Generate indices for each quad.
	k = 0;
	for(DWORD i = 0; i < (DWORD)iCellsNumPerRow; ++i)
	{
		DWORD j;
		for(j = 0; j < (DWORD)iCellsNumPerCol; ++j)
		{
			vecIndexData[k]     =   i   * iVerticesNumPerCol + j;
			vecIndexData[k + 1] =   i   * iVerticesNumPerCol + j + 1;
			vecIndexData[k + 2] = (i+1) * iVerticesNumPerCol + j;
					
			vecIndexData[k + 3] = (i+1) * iVerticesNumPerCol + j;
			vecIndexData[k + 4] =   i   * iVerticesNumPerCol + j + 1;
			vecIndexData[k + 5] = (i+1) * iVerticesNumPerCol + j + 1;
			
// 			1---2   5
// 			|  /  / |
// 			| /  /  | 
// 			3   4---6
			// next quad
			k += 6;
		}
// 			vecIndexData[k]     =   i   * iVerticesNumPerCol + j;
// 			vecIndexData[k + 1] =   i   * iVerticesNumPerCol ;
// 			vecIndexData[k + 2] = (i+1) * iVerticesNumPerCol + j;
// 					
// 			vecIndexData[k + 3] = (i+1) * iVerticesNumPerCol + j;
// 			vecIndexData[k + 4] =   i   * iVerticesNumPerCol ;
// 			vecIndexData[k + 5] = (i+1) * iVerticesNumPerCol ;
// 			k += 6;
	}
}

#endif
//--------------------------------------------------------------------------------------
// Create any D3D9 resources that will live through a device reset (D3DPOOL_MANAGED)
// and aren't tied to the back buffer size
//--------------------------------------------------------------------------------------
HRESULT CALLBACK OnD3D9CreateDevice( IDirect3DDevice9* pd3dDevice, const D3DSURFACE_DESC* pBackBufferSurfaceDesc,
                                     void* pUserContext )
{
#if phase3
	HRESULT hr;
#endif
#if phase1
// Setup the camera's view parameters    
	D3DXVECTOR3 vecEye( 0.0f, 0.0f, -5.0f );  
	D3DXVECTOR3 vecAt ( 0.0f, 0.0f, -0.0f );  
	g_Camera.SetViewParams( &vecEye, &vecAt );  
	g_Camera.SetEnablePositionMovement( true );
#endif
#if phase2
	D3DXCreateCubeTextureFromFile(pd3dDevice, L"grassenvmap1024.dds", &g_pCubeTex);
#endif
#if phase3
	createVertexDeclaration(pd3dDevice);
	DWORD dwTrianglesNum  = (g_iVerticesNumPerRow - 1) * (g_iVerticesNumPerCol - 1) * 2;
	DWORD dwVerticesNum = g_iVerticesNumPerRow * g_iVerticesNumPerCol;
	D3DVERTEXELEMENT9 elems[MAX_FVF_DECL_SIZE];
	UINT uElemsNum = 0;
	g_pVertexDecl->GetDeclaration(elems, &uElemsNum);
	V(	D3DXCreateMesh(dwTrianglesNum, dwVerticesNum, D3DXMESH_MANAGED, elems, pd3dDevice, &g_pMesh) );
	//===============================================================
	// Write the grid vertices and triangles to the mesh.
	GridVertexFormat *pVertices = NULL;
	V( g_pMesh->LockVertexBuffer(0, (void**)&pVertices) );
	std::vector<D3DXVECTOR3> vecVertexPosData;
	std::vector<DWORD> vecIndexData;
	writeVertexPosAndIndicesToVectors(g_iVerticesNumPerRow, g_iVerticesNumPerCol, g_fDeltaX, 
		g_fDeltaZ, D3DXVECTOR3(0.0f, 0.0f, 0.0f), vecVertexPosData, vecIndexData);
	for(int i = 0; i < g_iVerticesNumPerRow; ++i)
	{
		for(int j = 0; j < g_iVerticesNumPerCol; ++j)
		{
			DWORD index   = i * g_iVerticesNumPerCol + j;
			pVertices[index].pos  = vecVertexPosData[index];
			pVertices[index].scaledTexCoord = D3DXVECTOR2((float)j / (g_iVerticesNumPerCol-1), 
				(float)i / (g_iVerticesNumPerRow-1))* g_fTexScale;
			//g_fTexScale表示Grid里面用几个纹理,值越大表示纹理重复次数越多
			pVertices[index].normalizedTexCoord = D3DXVECTOR2((float)j / (g_iVerticesNumPerCol-1), 
				(float)i / (g_iVerticesNumPerRow-1));
		}
	}
	V( g_pMesh->UnlockVertexBuffer() );
	//===============================================================
	// Write triangle data(Indices & Attributes) so we can compute normals.
	WORD* pIndices = NULL;
	V( g_pMesh->LockIndexBuffer(0, (void**)&pIndices) );
	DWORD* pAttributes = NULL;
	V( g_pMesh->LockAttributeBuffer(0, &pAttributes) );
	for(UINT i = 0; i < g_pMesh->GetNumFaces(); ++i)
	{
		pIndices[i*3+0] = (WORD)vecIndexData[i*3+0];
		pIndices[i*3+1] = (WORD)vecIndexData[i*3+1];
		pIndices[i*3+2] = (WORD)vecIndexData[i*3+2];

		pAttributes[i] = 0; // All in subset 0.
	}
	
	V( g_pMesh->UnlockIndexBuffer() );
	V( g_pMesh->UnlockAttributeBuffer() );
	//===============================================================
	// Optimize for the vertex cache and build attribute table.
	// D3DXMESHOPT_ATTRSORT optimization builds an attribute table.
	DWORD* adj = new DWORD[g_pMesh->GetNumFaces() * 3];
	// Generate adjacency info
	V(g_pMesh->GenerateAdjacency(EPSILON, adj));
	V(g_pMesh->OptimizeInplace(D3DXMESHOPT_VERTEXCACHE|D3DXMESHOPT_ATTRSORT,adj, 0, 0, 0));
	delete[] adj;
	//===============================================================
	// Create textures.
	V(D3DXCreateTextureFromFile(pd3dDevice, L"normal_map1.dds", &g_pNormalTex1));
	V(D3DXCreateTextureFromFile(pd3dDevice, L"normal_map2.dds", &g_pNormalTex2));
#if !phase5
	V(D3DXCreateTextureFromFileEx(pd3dDevice, L"Mytex1.jpg", g_iVerticesNumPerRow, 
		g_iVerticesNumPerCol,1, 0, D3DFMT_R32F, D3DPOOL_MANAGED, D3DX_DEFAULT, D3DX_DEFAULT, 
		0, 0, 0, &g_pDisplacementTex1));
	V(D3DXCreateTextureFromFileEx(pd3dDevice, L"Mytex2.jpg", g_iVerticesNumPerRow, 
		g_iVerticesNumPerCol,1, 0, D3DFMT_R32F, D3DPOOL_MANAGED, D3DX_DEFAULT, D3DX_DEFAULT, 
		0, 0, 0, &g_pDisplacementTex2));
#else
	//这里通过perlin noise来生成随机的高度值
	V( D3DXCreateTexture(pd3dDevice, g_iVerticesNumPerCol, g_iVerticesNumPerRow, 0,
		0, D3DFMT_R32F,  D3DPOOL_MANAGED, &g_pDisplacementTex1) );
	V( D3DXCreateTexture(pd3dDevice, g_iVerticesNumPerCol, g_iVerticesNumPerRow, 0,
		 0, D3DFMT_R32F, D3DPOOL_MANAGED, &g_pDisplacementTex2) );
	D3DLOCKED_RECT lockedRect1,lockedRect2;
	g_pDisplacementTex1->LockRect(0, &lockedRect1, 0, 0);
	float* imageData1 = static_cast<float*>(lockedRect1.pBits);
	g_pDisplacementTex2->LockRect(0, &lockedRect2, 0, 0);
	float* imageData2 = static_cast<float*>(lockedRect2.pBits);
	myPerlinNoise.CopyPerlin2DToBuffer(g_iVerticesNumPerCol, g_iVerticesNumPerRow, imageData1);
	myPerlinNoise.CopyPerlin2DToBuffer(g_iVerticesNumPerCol, g_iVerticesNumPerRow, imageData2);
	g_pDisplacementTex1->UnlockRect(0);
	g_pDisplacementTex2->UnlockRect(0);

	// fill mipmaps
	//尼玛这里太帅了,必须要填充mipmap,创建的时候虽然创建了mipmap,但是没有填充,视角一旦太斜,就会出现
	//黑色,原因就是因为高层的mipmap是黑色,啊啊啊,尼玛这地方又花了3个小时啊!!!!!!!!!!!!!
	V( D3DXFilterTexture(
	g_pDisplacementTex1,// texture to fill mipmap levels
	0, // default palette
	0, // use top level as source for lower levels
	D3DX_DEFAULT)); // default filter
	V( D3DXFilterTexture(g_pDisplacementTex2,0,	0, D3DX_DEFAULT));

// 	D3DSURFACE_DESC dd1,dd2;
// 	g_pDisplacementTex1->GetLevelDesc(0,&dd1);
// 	g_pDisplacementTex1->GetLevelDesc(0,&dd2);
	//D3DXSaveTextureToFile(L"Mytex2.jpg", D3DXIFF_JPG,g_pDisplacementTex2,NULL);

#endif	
	
#endif
#if phase4
	WCHAR str[MAX_PATH];
	// Read the D3DX effect file
	V_RETURN( DXUTFindDXSDKMediaFileCch( str, MAX_PATH, L"PerlinNoise.fx" ) );
	// Create the effect 
	LPD3DXBUFFER pErrorBuff = NULL;
	V_RETURN( D3DXCreateEffectFromFile(
		pd3dDevice,		// associated device
		str,			// effect filename
		NULL,			// no preprocessor definitions
		NULL,			// no ID3DXInclude interface
		D3DXSHADER_DEBUG,	// compile flags
		NULL,			// don't share parameters
		&g_pEffect,		// return effect
		&pErrorBuff			// return error messages
		) );
	if( pErrorBuff )
		MessageBoxA(0, (char*)pErrorBuff->GetBufferPointer(), 0, 0);
	// get handle
	g_hTech = g_pEffect->GetTechniqueByName("myTech");
#if phase6
	g_hFlatTech = g_pEffect->GetTechniqueByName("flatTech");
#endif

	g_hWorld = g_pEffect->GetParameterByName(0, "g_mWorld");
	g_hWorldInv = g_pEffect->GetParameterByName(0, "g_mWorldInv");
	g_hWorldViewProj = g_pEffect->GetParameterByName(0, "g_mWorldViewProj");
	g_hEyePositionInWorld = g_pEffect->GetParameterByName(0, "g_eyePositionInWorld");
	g_hDirectionalLightStruct = g_pEffect->GetParameterByName(0, "g_structDirectionalLight");
	g_hMaterialStruct = g_pEffect->GetParameterByName(0, "g_structMaterial");

	g_hNormalTex1 = g_pEffect->GetParameterByName(0, "g_texNormalMap1");
	g_hNormalTex2 = g_pEffect->GetParameterByName(0, "g_texNormalMap2");
	g_hDisplacementTex1 = g_pEffect->GetParameterByName(0, "g_texDisplacementMap1");
	g_hDisplacementTex2 = g_pEffect->GetParameterByName(0, "g_texDisplacementMap2");
	g_hNormalOffset1 = g_pEffect->GetParameterByName(0, "g_normalOffset1");
	g_hNormalOffset2 = g_pEffect->GetParameterByName(0, "g_normalOffset2");
	g_hDisplacementOffset1 = g_pEffect->GetParameterByName(0, "g_DisplacementOffset1");
	g_hDisplacementOffset2 = g_pEffect->GetParameterByName(0, "g_DisplacementOffset2");
	g_hScaleHeights = g_pEffect->GetParameterByName(0, "g_scaleHeights");
	g_hDelta = g_pEffect->GetParameterByName(0, "g_delta");

	// We don't need to set these every frame since they do not change.
	D3DXMATRIXA16 mWorld, mWorldInv;
	D3DXMatrixIdentity(&mWorld);
	V(g_pEffect->SetMatrix(g_hWorld, &mWorld));
	D3DXMatrixInverse(&mWorldInv, 0, &mWorld);
	V(g_pEffect->SetMatrix(g_hWorldInv, &mWorldInv));
	V(g_pEffect->SetTexture(g_hNormalTex1, g_pNormalTex1));
	V(g_pEffect->SetTexture(g_hNormalTex2, g_pNormalTex2));
	V(g_pEffect->SetTexture(g_hDisplacementTex1, g_pDisplacementTex1));
	V(g_pEffect->SetTexture(g_hDisplacementTex2, g_pDisplacementTex2));

	g_structDirectionalLight.directionInWorld = D3DXVECTOR3(0.0f, -1.0f, -3.0f);
	D3DXVec3Normalize(&g_structDirectionalLight.directionInWorld, &g_structDirectionalLight.directionInWorld);
	g_structDirectionalLight.ambient = D3DXCOLOR(0.3f, 0.3f, 0.3f, 1.0f);
	g_structDirectionalLight.diffuse = D3DXCOLOR(1.0f, 1.0f, 1.0f, 1.0f);
	g_structDirectionalLight.specular = D3DXCOLOR(0.7f, 0.7f, 0.7f, 1.0f);

	V(g_pEffect->SetValue(g_hDirectionalLightStruct, &g_structDirectionalLight, sizeof(DirectionalLight)));

	g_structMaterial.ambient   = D3DXCOLOR(0.4f, 0.4f, 0.7f, 0.0f);
	g_structMaterial.diffuse   = D3DXCOLOR(0.4f, 0.4f, 0.7f, 1.0f);
	g_structMaterial.specular      = 0.8f*WHITE;
	g_structMaterial.specularPower = 128.0f;

	V(g_pEffect->SetValue(g_hMaterialStruct, &g_structMaterial, sizeof(Material)));
	V(g_pEffect->SetValue(g_hScaleHeights, &g_scaleHeights, sizeof(D3DXVECTOR2)));
	D3DXVECTOR2 delta(g_fDeltaX, g_fDeltaZ);
	V(g_pEffect->SetValue(g_hDelta, &delta, sizeof(D3DXVECTOR2)));

#endif
    return S_OK;
}

#if phase2
struct MyVertexFormat
{
	FLOAT x, y, z;
	FLOAT u, v, w;
};
#define FVF_VERTEX (D3DFVF_XYZ | D3DFVF_TEX1 | D3DFVF_TEXCOORDSIZE3(0) )
static HRESULT initVertexIndexBuffer(IDirect3DDevice9* pd3dDevice)
{
	static const MyVertexFormat Vertices[] = 
	{
		//+X
		{  1, -1, -1,  1, -1, -1 },
		{  1, -1, 1, 1, -1, 1 },
		{  1, 1, 1 , 1, 1, 1  },
		{ 1, 1, -1,	1, 1, -1 },
		//-X
		{ -1, -1, -1,-1, -1, -1 },	
		{ -1, 1, -1, -1, 1, -1 }, 
		{ -1, 1, 1 , -1, 1, 1 }, 
		{ -1, -1, 1, -1, -1, 1}, 
		//+Y
		{ -1, 1, -1 ,-1, 1, -1 }, 
		{ 1, 1, -1 ,1, 1, -1 },
		{ 1, 1, 1 ,1, 1, 1},
		{ -1, 1, 1 ,-1, 1, 1 },
		//-Y
		{ -1, -1, -1,-1, -1, -1 }, 
		{ -1, -1, 1,-1, -1, 1 },
		{ 1, -1, 1,1, -1, 1 },
		{ 1, -1, -1, 1, -1, -1},
		//Z
		{ -1, -1, 1,-1, -1, 1 }, 
		{ -1, 1, 1,-1, 1, 1 },
		{ 1, 1, 1,1, 1, 1 },
		{ 1, -1, 1,1, -1, 1 },
		//-Z
		 { -1, -1, -1,-1, -1, -1 }, 
		{ 1, -1, -1,1, -1, -1 },
		{ 1, 1, -1,1, 1, -1 },
		{ -1, 1, -1,-1, 1, -1 }
	};
	if (FAILED(pd3dDevice->CreateVertexBuffer(sizeof(Vertices),
		0, FVF_VERTEX,
		D3DPOOL_DEFAULT,
		&g_pVB, NULL))) {
			return E_FAIL;
	}
	void* pVertices;
	if (FAILED(g_pVB->Lock(0, 0, /* map entire buffer */
		&pVertices, 0))) {
			return E_FAIL;
	}
	memcpy(pVertices, Vertices, sizeof(Vertices));
	g_pVB->Unlock();

	// Create and initialize index buffer
	static const WORD Indices[] =
	{
		0, 1, 2,
		0, 2, 3,

		4, 5, 6,
		4, 6, 7,

		8, 9, 10,
		8,10, 11,

		12,13,14,
		12,14,15,

		16,17,18,
		16,18,19,

		20,21,22,
		20,22,23
	};
	if (FAILED(pd3dDevice->CreateIndexBuffer(sizeof(Indices),
		D3DUSAGE_WRITEONLY, 
		D3DFMT_INDEX16,
		D3DPOOL_DEFAULT,
		&g_pIB, NULL))) {
			return E_FAIL;
	}
	void* pIndices;
	if (FAILED(g_pIB->Lock(0, 0, /* map entire buffer */
		&pIndices, 0))) {
			return E_FAIL;
	}
	memcpy(pIndices, Indices, sizeof(Indices));
	g_pIB->Unlock();
	return S_OK;
}



#endif
//--------------------------------------------------------------------------------------
// Create any D3D9 resources that won't live through a device reset (D3DPOOL_DEFAULT) 
// or that are tied to the back buffer size 
//--------------------------------------------------------------------------------------
HRESULT CALLBACK OnD3D9ResetDevice( IDirect3DDevice9* pd3dDevice, const D3DSURFACE_DESC* pBackBufferSurfaceDesc,
                                    void* pUserContext )
{
#if phase1
	pd3dDevice->SetRenderState( D3DRS_CULLMODE, D3DCULL_NONE );  
	//关闭光照处理, 默认情况下启用光照处理   
	pd3dDevice->SetRenderState( D3DRS_LIGHTING, FALSE );  
//Setup the camera's projection parameters   
	float fAspectRatio = pBackBufferSurfaceDesc->Width / ( FLOAT )pBackBufferSurfaceDesc->Height;  
	  
	g_Camera.SetProjParams( D3DX_PI / 2, fAspectRatio, 0.1f, 5000.0f );  

#endif
#if phase4
	#if phase3
	HRESULT hr;
	if( g_pEffect )
        V_RETURN( g_pEffect->OnResetDevice() );
#endif
#endif
#if !phase2
    return S_OK;
#else
	return initVertexIndexBuffer(pd3dDevice);
#endif
}


//--------------------------------------------------------------------------------------
// Handle updates to the scene.  This is called regardless of which D3D API is used
//--------------------------------------------------------------------------------------
void CALLBACK OnFrameMove( double fTime, float fElapsedTime, void* pUserContext )
{
#if phase1
	g_Camera.FrameMove( fElapsedTime );
#endif
#if phase4
	g_normalMapOffset1 += g_normalMapVelocity1 * fElapsedTime;
	g_normalMapOffset2 += g_normalMapVelocity2 * fElapsedTime;
	g_displacementMapOffset1 += g_displacementMapVelocity1 * fElapsedTime;
	g_displacementMapOffset2 += g_displacementMapVelocity2 * fElapsedTime;

// 	if(g_normalMapOffset1.x >= 1.0f || g_normalMapOffset1.x <= -1.0f)
// 		g_normalMapOffset1.x = 0.0f;
// 	if(g_normalMapOffset2.x >= 1.0f || g_normalMapOffset2.x <= -1.0f)
// 		g_normalMapOffset2.x = 0.0f;
// 	if(g_normalMapOffset1.y >= 1.0f || g_normalMapOffset1.y <= -1.0f)
// 		g_normalMapOffset1.y = 0.0f;
// 	if(g_normalMapOffset2.y >= 1.0f || g_normalMapOffset2.y <= -1.0f)
// 		g_normalMapOffset2.y = 0.0f;
// 
// 	if(g_displacementMapOffset1.x >= 1.0f || g_displacementMapOffset1.x <= -1.0f)
// 		g_displacementMapOffset1.x = 0.0f;
// 	if(g_displacementMapOffset2.x >= 1.0f || g_displacementMapOffset2.x <= -1.0f)
// 		g_displacementMapOffset2.x = 0.0f;
// 	if(g_displacementMapOffset1.y >= 1.0f || g_displacementMapOffset1.y <= -1.0f)
// 		g_displacementMapOffset1.y = 0.0f;
// 	if(g_displacementMapOffset2.y >= 1.0f || g_displacementMapOffset2.y <= -1.0f)
// 		g_displacementMapOffset2.y = 0.0f;
//注意这里不应该是以1来为单位,应该为2,因为是纹理是MIRROR方式,我就懒一下,不改了
#endif
}


//--------------------------------------------------------------------------------------
// Render the scene using the D3D9 device
//--------------------------------------------------------------------------------------
void CALLBACK OnD3D9FrameRender( IDirect3DDevice9* pd3dDevice, double fTime, float fElapsedTime, void* pUserContext )
{
    HRESULT hr;

    // Clear the render target and the zbuffer 
    V( pd3dDevice->Clear( 0, NULL, D3DCLEAR_TARGET | D3DCLEAR_ZBUFFER, D3DCOLOR_ARGB( 0, 45, 50, 170 ), 1.0f, 0 ) );

    // Render the scene
    if( SUCCEEDED( pd3dDevice->BeginScene() ) )
    {
#if phase2

		pd3dDevice->SetRenderState(D3DRS_LIGHTING, false);
		// Set world matrix  
		D3DXMATRIX M;
		D3DXMatrixIdentity( &M ); // M = identity matrix
		D3DXMatrixScaling(&M,2000, 2000, 2000);
		pd3dDevice->SetTransform(D3DTS_WORLD, &M) ; 
		// Set view matrix   
		D3DXMATRIX view  = *g_Camera.GetViewMatrix() ;  
		pd3dDevice->SetTransform(D3DTS_VIEW, &view) ;  
		// Set projection matrix   
		D3DXMATRIX proj  = *g_Camera.GetProjMatrix() ;
		pd3dDevice->SetTransform(D3DTS_PROJECTION, &proj) ;  
		pd3dDevice->SetStreamSource(0, g_pVB, 0, sizeof(MyVertexFormat));
		pd3dDevice->SetIndices(g_pIB);//sets the current index buffer. 
		pd3dDevice->SetFVF(FVF_VERTEX);//Sets the current vertex stream declaration.
		pd3dDevice->SetTexture(0, g_pCubeTex);
		pd3dDevice->DrawIndexedPrimitive(D3DPT_TRIANGLELIST, 0, 0, 24, 0, 12);	

#endif
#if phase6
		g_pEffect->SetTechnique(g_hFlatTech);
		UINT iPass, cPasses;
			D3DXMATRIXA16 mWorldViewProjection;
		mWorldViewProjection =  *g_Camera.GetViewMatrix() * *g_Camera.GetProjMatrix();
		V( g_pEffect->SetMatrix( g_hWorldViewProj, &mWorldViewProjection) );
		V(g_pEffect->SetValue(g_hDisplacementOffset1, &g_displacementMapOffset1, sizeof(D3DXVECTOR2)));
		V(g_pEffect->SetValue(g_hDisplacementOffset2, &g_displacementMapOffset2, sizeof(D3DXVECTOR2)));
		V( g_pEffect->Begin( &cPasses, 0 ) );
		for( iPass = 0; iPass < cPasses ; iPass++ )
		{
			V( g_pEffect->BeginPass( iPass ) );
			g_pMesh->DrawSubset(0);
			V( g_pEffect->EndPass() );
		}
		V( g_pEffect->End() );
#endif
#if phase4
		V(g_pEffect->SetTechnique(g_hTech));
		//set WorldViewProject matrix
		mWorldViewProjection =  *g_Camera.GetViewMatrix() * *g_Camera.GetProjMatrix();
		V( g_pEffect->SetMatrix( g_hWorldViewProj, &mWorldViewProjection) );
		V( g_pEffect->SetFloatArray( g_hEyePositionInWorld, (const float*)(g_Camera.GetEyePt()), 3) );
		V(g_pEffect->SetValue(g_hNormalOffset1, &g_normalMapOffset1, sizeof(D3DXVECTOR2)));
		V(g_pEffect->SetValue(g_hNormalOffset2, &g_normalMapOffset2, sizeof(D3DXVECTOR2)));
		V(g_pEffect->SetValue(g_hDisplacementOffset1, &g_displacementMapOffset1, sizeof(D3DXVECTOR2)));
		V(g_pEffect->SetValue(g_hDisplacementOffset2, &g_displacementMapOffset2, sizeof(D3DXVECTOR2)));
		//UINT iPass, cPasses;
		
		V( g_pEffect->Begin( &cPasses, 0 ) );
		for( iPass = 0; iPass < cPasses ; iPass++ )
		{
			V( g_pEffect->BeginPass( iPass ) );
			g_pMesh->DrawSubset(0);
			V( g_pEffect->EndPass() );
		}
		V( g_pEffect->End() );
#endif

        V( pd3dDevice->EndScene() );
    }
}


//--------------------------------------------------------------------------------------
// Handle messages to the application 
//--------------------------------------------------------------------------------------
LRESULT CALLBACK MsgProc( HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam,
                          bool* pbNoFurtherProcessing, void* pUserContext )
{
#if phase1
	g_Camera.HandleMessages( hWnd, uMsg, wParam, lParam );
#endif
    return 0;
}


//--------------------------------------------------------------------------------------
// Release D3D9 resources created in the OnD3D9ResetDevice callback 
//--------------------------------------------------------------------------------------
void CALLBACK OnD3D9LostDevice( void* pUserContext )
{
#if phase2
	SAFE_RELEASE(g_pVB);
	SAFE_RELEASE(g_pIB);
#endif
#if phase3
	SAFE_RELEASE(g_pVertexDecl);
#endif
#if phase4
	if( g_pEffect )
        g_pEffect->OnLostDevice();
#endif
}


//--------------------------------------------------------------------------------------
// Release D3D9 resources created in the OnD3D9CreateDevice callback 
//--------------------------------------------------------------------------------------
void CALLBACK OnD3D9DestroyDevice( void* pUserContext )
{
#if phase2
	SAFE_RELEASE(g_pCubeTex);
#endif
#if phase3
	SAFE_RELEASE(g_pMesh);
	SAFE_RELEASE(g_pNormalTex1);
	SAFE_RELEASE(g_pNormalTex2);
	SAFE_RELEASE(g_pDisplacementTex1);
	SAFE_RELEASE(g_pDisplacementTex2);
#endif
#if phase4
	SAFE_RELEASE(g_pEffect);
#endif
}


//--------------------------------------------------------------------------------------
// Initialize everything and go into a render loop
//--------------------------------------------------------------------------------------
INT WINAPI wWinMain( HINSTANCE, HINSTANCE, LPWSTR, int )
{
    // Enable run-time memory check for debug builds.
#if defined(DEBUG) | defined(_DEBUG)
    _CrtSetDbgFlag( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF );
#endif

    // Set the callback functions
    DXUTSetCallbackD3D9DeviceAcceptable( IsD3D9DeviceAcceptable );
    DXUTSetCallbackD3D9DeviceCreated( OnD3D9CreateDevice );
    DXUTSetCallbackD3D9DeviceReset( OnD3D9ResetDevice );
    DXUTSetCallbackD3D9FrameRender( OnD3D9FrameRender );
    DXUTSetCallbackD3D9DeviceLost( OnD3D9LostDevice );
    DXUTSetCallbackD3D9DeviceDestroyed( OnD3D9DestroyDevice );
    DXUTSetCallbackDeviceChanging( ModifyDeviceSettings );
    DXUTSetCallbackMsgProc( MsgProc );
    DXUTSetCallbackFrameMove( OnFrameMove );

    // TODO: Perform any application-level initialization here

    // Initialize DXUT and create the desired Win32 window and Direct3D device for the application
    DXUTInit( true, true ); // Parse the command line and show msgboxes
    DXUTSetHotkeyHandling( true, true, true );  // handle the default hotkeys
    DXUTSetCursorSettings( true, true ); // Show the cursor and clip it when in full screen
    DXUTCreateWindow( L"3D_Math_2DPerlinNoise_ocean" );
    DXUTCreateDevice( true, 1024, 768 );

    // Start the render loop
    DXUTMainLoop();

    // TODO: Perform any application-level cleanup here

    return DXUTGetExitCode();
}

/*------------------------------------------------------------
	PerlinNoise.fx -- achieve Perlin noise
			(c) Seamanj.2013/9/4
------------------------------------------------------------*/

struct Material
{
	float4 ambient;
	float4 diffuse;
	float4 specular;
	float  specularPower;
};

struct DirectionalLight
{
	float4 ambient;
	float4 diffuse;
	float4 specular;
	float3 directionInWorld;  
};

uniform extern float4x4 g_mWorld;
uniform extern float4x4 g_mWorldInv;
uniform extern float4x4 g_mWorldViewProj;
uniform extern float3   g_eyePositionInWorld;
uniform extern DirectionalLight g_structDirectionalLight;
uniform extern Material g_structMaterial;

// Two normal maps and displacement maps.
uniform extern texture  g_texNormalMap1;
uniform extern texture  g_texNormalMap2;
uniform extern texture  g_texDisplacementMap1;
uniform extern texture  g_texDisplacementMap2;

// Texture coordinate offset vectors for scrolling
// normal maps and displacement maps.
uniform extern float2   g_normalOffset1;
uniform extern float2   g_normalOffset2;
uniform extern float2   g_DisplacementOffset1;
uniform extern float2   g_DisplacementOffset2;



// User-defined scaling factors to scale the heights
// sampled from the displacement map into a more convenient
// range.
uniform extern float2 g_scaleHeights;

// Space between grids in x,z directions in local space
// used for finite differencing.
uniform extern float2 g_delta;

// Shouldn't be hardcoded, but ok for demo.
static const float DISPLACEMENTMAP_SIZE = 129.0f;
static const float DISPLACEMENTMAP_DELTA   = 1.0f / DISPLACEMENTMAP_SIZE;

sampler g_samNormalMap1 = sampler_state
{
	Texture = <g_texNormalMap1>;
	MinFilter = ANISOTROPIC;
	MaxAnisotropy = 12;
	MagFilter = LINEAR;
	MipFilter = LINEAR;
	AddressU  = WRAP;
    AddressV  = WRAP;
};

sampler g_samNormalMap2 = sampler_state
{
	Texture = <g_texNormalMap2>;
	MinFilter = ANISOTROPIC;
	MaxAnisotropy = 12;
	MagFilter = LINEAR;
	MipFilter = LINEAR;
	AddressU  = WRAP;
    AddressV  = WRAP;
};

sampler g_samDisplacementMap1 = sampler_state
{
	Texture = <g_texDisplacementMap1>;
	MinFilter = POINT;
	MagFilter = POINT;
	MipFilter = POINT;
	AddressU  = MIRROR;
    AddressV  = MIRROR;
};

sampler g_samDisplacementMap2 = sampler_state
{
	Texture = <g_texDisplacementMap2>;
	MinFilter = POINT;
	MagFilter = POINT;
	MipFilter = POINT;
	AddressU  = MIRROR;
    AddressV  = MIRROR;
};


struct OutputVS
{
    float4 posInHomogeneous : POSITION0;
    float3 toEyeInTangent    : TEXCOORD0;
    float3 lightDirectionInTangent : TEXCOORD1;
    float2 texcoord1 : TEXCOORD2;
    float2 texcoord2 : TEXCOORD3;
};
float DoDisplacementMapping(float2 texCoord1, float2 texCoord2)
{
	// Transform to texel space
    float2 texelPos = DISPLACEMENTMAP_SIZE * texCoord1;
        
    // Determine the lerp amounts.           
    float2 lerps = frac(texelPos);
    
	float height1[4];
	//由于移位纹理显示青色(即B,G通道都为1,所以应该是R通道存储的是高度
	height1[0] = tex2Dlod(g_samDisplacementMap1, float4(texCoord1, 0.0f, 0.0f)).r;
	height1[1] = tex2Dlod(g_samDisplacementMap1, float4(texCoord1, 0.0f, 0.0f)+float4(DISPLACEMENTMAP_DELTA, 0.0f, 0.0f, 0.0f)).r;
	height1[2] = tex2Dlod(g_samDisplacementMap1, float4(texCoord1, 0.0f, 0.0f)+float4(0.0f, DISPLACEMENTMAP_DELTA, 0.0f, 0.0f)).r;
	height1[3] = tex2Dlod(g_samDisplacementMap1, float4(texCoord1, 0.0f, 0.0f)+float4(DISPLACEMENTMAP_DELTA, DISPLACEMENTMAP_DELTA, 0.0f, 0.0f)).r;
	//这里取出来的值范围在[0,1]之内
	// Filter displacement map:
	float h1 = lerp( lerp( height1[0], height1[1], lerps.x ),
                     lerp( height1[2], height1[3], lerps.x ),
                     lerps.y );
	
	texelPos = DISPLACEMENTMAP_SIZE * texCoord2;
	lerps    = frac(texelPos);
	
	float height2[4];
	height2[0] = tex2Dlod(g_samDisplacementMap2, float4(texCoord2, 0.0f, 0.0f)).r;
	height2[1] = tex2Dlod(g_samDisplacementMap2, float4(texCoord2, 0.0f, 0.0f)+float4(DISPLACEMENTMAP_DELTA, 0.0f, 0.0f, 0.0f)).r;
	height2[2] = tex2Dlod(g_samDisplacementMap2, float4(texCoord2, 0.0f, 0.0f)+float4(0.0f, DISPLACEMENTMAP_DELTA, 0.0f, 0.0f)).r;
	height2[3] = tex2Dlod(g_samDisplacementMap2, float4(texCoord2, 0.0f, 0.0f)+float4(DISPLACEMENTMAP_DELTA, DISPLACEMENTMAP_DELTA, 0.0f, 0.0f)).r;
	
	// Filter displacement map:
	float h2 = lerp( lerp( height2[0], height2[1], lerps.x ),
                     lerp( height2[2], height2[3], lerps.x ),
                     lerps.y );
                   
	// Sum and scale the sampled heights.  
    return g_scaleHeights.x * h1 + g_scaleHeights.y * h2;
}
OutputVS myVertexEntry(float3 positionInLocal : POSITION0, 
                 float2 scaledTexCoord     : TEXCOORD0,//供法线纹理使用
                 float2 normalizedTexCoord : TEXCOORD1)//供移位纹理使用
{
    // Zero out our output.
	OutputVS outVS = (OutputVS)0;
	
	// Scroll vertex texture coordinates to animate waves.
	float2 DisplacementTexCoord1 = normalizedTexCoord + g_DisplacementOffset1;
	float2 DisplacementTexCoord2 = normalizedTexCoord + g_DisplacementOffset2;
//  	float2 DisplacementTexCoord1 = normalizedTexCoord ;
//  	float2 DisplacementTexCoord2 = normalizedTexCoord ;
	// Set y-coordinate of water grid vertices based on displacement mapping.
	positionInLocal.y = DoDisplacementMapping(DisplacementTexCoord1, DisplacementTexCoord2) / 480 * 5 ;
	
	// Estimate TBN-basis using finite differencing in local space.  
	float left = DoDisplacementMapping(DisplacementTexCoord1 + float2(DISPLACEMENTMAP_DELTA, 0.0f), 
	                        DisplacementTexCoord2 + float2(DISPLACEMENTMAP_DELTA, 0.0f)) / 480 * 5;
	float front = DoDisplacementMapping(DisplacementTexCoord1 + float2(0.0f, DISPLACEMENTMAP_DELTA), 
	                        DisplacementTexCoord2 + float2(0.0f, DISPLACEMENTMAP_DELTA))/ 480 * 5;  
	                        
	float3x3 TBN;                       
	TBN[0] = normalize(float3(1.0f, (left - positionInLocal.y)/g_delta.x , 0.0f)); //Tangent
	TBN[1] = normalize(float3(0.0f, (front - positionInLocal.y)/g_delta.y , -1.0f));//Binormal
	TBN[2] = normalize(cross(TBN[0], TBN[1]));//Normal
	
	// Matrix transforms from object space to tangent space.
	float3x3 toTangentSpace = transpose(TBN);
	
	// Transform eye position to local space.
	float3 eyePositionInLocal = mul(float4(g_eyePositionInWorld, 1.0f), g_mWorldInv).xyz;
	
	// Transform to-eye vector to tangent space.
	float3 toEyeInLocal = eyePositionInLocal - positionInLocal;
	outVS.toEyeInTangent = mul(toEyeInLocal, toTangentSpace);
	
	// Transform light direction to tangent space.
	float3 lightDirectionInLocal = mul(float4(g_structDirectionalLight.directionInWorld, 0.0f), g_mWorldInv).xyz;
	outVS.lightDirectionInTangent  = mul(lightDirectionInLocal, toTangentSpace);
	
	// Transform to homogeneous clip space.
	outVS.posInHomogeneous = mul(float4(positionInLocal, 1.0f), g_mWorldViewProj);
	
	// Scroll texture coordinates.
	outVS.texcoord1 = scaledTexCoord + g_normalOffset1;
	outVS.texcoord2 = scaledTexCoord + g_normalOffset2;
	
	// Done--return the output.
    return outVS;
}

float4 myPixelEntry(float3 toEyeInTangent    : TEXCOORD0,
					float3 lightDirectionInTangent : TEXCOORD1,
				    float2 texcoord1      : TEXCOORD2,
				    float2 texcoord2      : TEXCOORD3) : COLOR
{
	// Interpolated normals can become unnormal--so normalize.
	// Note that toEyeW and normalW do not need to be normalized
	// because they are just used for a reflection and environment
	// map look-up and only direction matters.
	toEyeInTangent    = normalize(toEyeInTangent);
	lightDirectionInTangent = normalize(lightDirectionInTangent);
	
	// Light vector is opposite the direction of the light.
	float3 toLightInTangent = -lightDirectionInTangent;
	
	// Sample normal map.
	float3 normalInTangent1 = tex2D(g_samNormalMap1, texcoord1);
	float3 normalInTangent2 = tex2D(g_samNormalMap2, texcoord2);
	
	// Expand from [0, 1] compressed interval to true [-1, 1] interval.
    normalInTangent1 = 2.0f * normalInTangent1 - 1.0f;
    normalInTangent2 = 2.0f * normalInTangent2 - 1.0f;
    
	// Average the two vectors.
	float3 normalInTangent = normalize( 0.5f * ( normalInTangent1 + normalInTangent2));
	//normalInTangent = float3(0,0,1);
	// Compute the reflection vector.
	float3 r = reflect(lightDirectionInTangent, normalInTangent);
	
	// Determine how much (if any) specular light makes it into the eye.
	float s  = pow(max(dot(r, toEyeInTangent), 0.0f), g_structMaterial.specularPower);

	// Determine the diffuse light intensity that strikes the vertex.
	float d = max(dot(toLightInTangent, normalInTangent), 0.0f);
	
	// If the diffuse light intensity is low, kill the specular lighting term.
	// It doesn't look right to add specular light when the surface receives 
	// little diffuse light.
	if(d <= 0.0f)
	     s = 0.0f;
	
	// Compute the ambient, diffuse and specular terms separatly. 
	float3 spec = s * ( g_structMaterial.specular * g_structDirectionalLight.specular).rgb;
	float3 diffuse = d * (g_structMaterial.diffuse*g_structDirectionalLight.diffuse.rgb);
	float3 ambient = g_structMaterial.ambient * g_structDirectionalLight.ambient;
	
	float3 final = ambient + diffuse + spec;
	
	// Output the color and the alpha.
    return float4(final, g_structMaterial.diffuse.a);
}

technique myTech
{
    pass P0
    {
        // Specify the vertex and pixel shader associated with this pass.
        vertexShader = compile vs_3_0 myVertexEntry();
        pixelShader  = compile ps_3_0 myPixelEntry();
    }    
}



struct FlatOutputVS
{
    float4 posInHomogeneous : POSITION0;
    float2 texcoord1 : TEXCOORD0;
    float2 texcoord2 : TEXCOORD1;
};
FlatOutputVS flatVertexEntry(float3 positionInLocal : POSITION0, 
                 float2 scaledTexCoord     : TEXCOORD0,//供法线纹理使用
                 float2 normalizedTexCoord : TEXCOORD1)//供移位纹理使用
{
    // Zero out our output.
	FlatOutputVS outVS = (FlatOutputVS)0;
	
	// Scroll vertex texture coordinates to animate waves.
	outVS.texcoord1 = normalizedTexCoord + g_DisplacementOffset1;
	outVS.texcoord2 = normalizedTexCoord + g_DisplacementOffset2;
// 	outVS.texcoord1 = normalizedTexCoord ;
// 	outVS.texcoord2 = normalizedTexCoord ;
	outVS.posInHomogeneous = mul(float4(positionInLocal, 1.0f), g_mWorldViewProj);

	// Done--return the output.
    return outVS;
}
float4 flatPixelEntry(float2 texCoord1      : TEXCOORD0,
					  float2 texCoord2      : TEXCOORD1) : COLOR
{

    
// Transform to texel space
    float2 texelPos = DISPLACEMENTMAP_SIZE * texCoord1;
        
    // Determine the lerp amounts.           
    float2 lerps = frac(texelPos);
    
	float height1[4];
	//由于移位纹理显示青色(即B,G通道都为1,所以应该是R通道存储的是高度
	height1[0] = tex2D(g_samDisplacementMap1, float4(texCoord1, 0.0f, 0.0f)).r;
	height1[1] = tex2D(g_samDisplacementMap1, float4(texCoord1, 0.0f, 0.0f)+float4(DISPLACEMENTMAP_DELTA, 0.0f, 0.0f, 0.0f)).r;
	height1[2] = tex2D(g_samDisplacementMap1, float4(texCoord1, 0.0f, 0.0f)+float4(0.0f, DISPLACEMENTMAP_DELTA, 0.0f, 0.0f)).r;
	height1[3] = tex2D(g_samDisplacementMap1, float4(texCoord1, 0.0f, 0.0f)+float4(DISPLACEMENTMAP_DELTA, DISPLACEMENTMAP_DELTA, 0.0f, 0.0f)).r;
	//这里取出来的值范围在[0,1]之内
	// Filter displacement map:
	float h1 = lerp( lerp( height1[0], height1[1], lerps.x ),
                     lerp( height1[2], height1[3], lerps.x ),
                     lerps.y );
	
	texelPos = DISPLACEMENTMAP_SIZE * texCoord2;
	lerps    = frac(texelPos);
	
	float height2[4];
	height2[0] = tex2D(g_samDisplacementMap2, float4(texCoord2, 0.0f, 0.0f)).r;
	height2[1] = tex2D(g_samDisplacementMap2, float4(texCoord2, 0.0f, 0.0f)+float4(DISPLACEMENTMAP_DELTA, 0.0f, 0.0f, 0.0f)).r;
	height2[2] = tex2D(g_samDisplacementMap2, float4(texCoord2, 0.0f, 0.0f)+float4(0.0f, DISPLACEMENTMAP_DELTA, 0.0f, 0.0f)).r;
	height2[3] = tex2D(g_samDisplacementMap2, float4(texCoord2, 0.0f, 0.0f)+float4(DISPLACEMENTMAP_DELTA, DISPLACEMENTMAP_DELTA, 0.0f, 0.0f)).r;
	
	// Filter displacement map:
	float h2 = lerp( lerp( height2[0], height2[1], lerps.x ),
                     lerp( height2[2], height2[3], lerps.x ),
                     lerps.y );
                   


	// Sum and scale the sampled heights.  
   float result = (g_scaleHeights.x * h1 + g_scaleHeights.y * h2) / 480;
   return float4(result, result,result, 1);
}
technique flatTech
{
    pass P0
    {
        // Specify the vertex and pixel shader associated with this pass.
        vertexShader = compile vs_3_0 flatVertexEntry();
        pixelShader  = compile ps_3_0 flatPixelEntry();
    }    
}

/*------------------------------------------------------------
	Perlin2D.h -- achieve perlin 2d noise
			(c) Seamanj.2013/9/4
------------------------------------------------------------*/
#pragma once
#include <vector>
#include <windows.h>

class Perlin2D
{
public:

	Perlin2D( int octaves,//需要融合多少倍频
		    int maxWaveLength,//最高频谐波波长
			float maxAmplitude, //最高频谐波幅度
			float falloff)//频率每增加一倍,幅度减少的倍数
			: m_iOctaves(octaves), m_iMaxWaveLenght(maxWaveLength),
			  m_fMaxAmplitude(maxAmplitude), m_fFallOff(falloff)
	{}
	~Perlin2D()
	{}
	void GenPerlin2D(int width, int height);
	void CopyPerlin2DToBuffer(int width, int height, float* pBuffer)
	{
		GenPerlin2D(width, height);
		std::copy(m_vecData.begin(), m_vecData.end(),pBuffer);
	}
private:
	float _getRandomFloat(float lowBound, float highBound)
	{
		if( lowBound >= highBound ) // bad input
		return lowBound;

		// get random float in [0, 1] interval
		float f = (rand() % 10000) * 0.0001f; 

		// return float in [lowBound, highBound] interval. 
		return (f * (highBound - lowBound)) + lowBound; 
	}
	// cosin插值
	// 插值点离a越近,x越小 x = [0,1]
	float _interpolation(float a, float b, float x)
	{
		float ft = x * 3.1415927f;
		float f = (1 - cos(ft)) * 0.5f;
		return a*(1-f) + b*f;
	}
	std::vector<float> m_vecData;
	int m_iOctaves;
	int m_iMaxWaveLenght;
	float m_fMaxAmplitude;
	float m_fFallOff;
};

/*------------------------------------------------------------
Perlin2D.cpp -- achieve perlin 2d noise
(c) Seamanj.2013/9/4
------------------------------------------------------------*/
#include "DXUT.h"
#include "Perlin2D.h"
//Perlin2D myPerlinNoise(4, 32, 256, 0.5);
void Perlin2D::GenPerlin2D(int width, int height)
{
std::vector<float> vecData(width * height, 0);
m_vecData.clear();
m_vecData.resize(width * height, 0);
srand(GetTickCount());
int waveLength = m_iMaxWaveLenght;
float amplitude = m_fMaxAmplitude;
// k从小变大,倍频逐渐增高
//256+128+64+32 = 480
//产生0 ~ 480的随机数
for(int k = 0; k < m_iOctaves; k++)
{
// 得到一组二维的离散的噪声图,离散步长为waveLength
for(int i = 0; i < height; i += waveLength )
{
// 噪声数组置0
for(int j=0; j < width; j += waveLength)
vecData[i * height + j] = _getRandomFloat(0, amplitude);
}
// 噪声滤波
//32 64 96 < 129-32 = 97
for(int i = waveLength; i < height - waveLength;i += waveLength)
{
for(int j = waveLength;j < width - waveLength; j += waveLength)
{

int iLeftUp = ( i - waveLength ) * width + ( j - waveLength );
int iLeftLow = ( i + waveLength ) * width + ( j - waveLength );
int iRightUp = ( i - waveLength ) * width + ( j + waveLength );
int iRightLow = ( i + waveLength ) * width + ( j + waveLength );

float corners = ( vecData[iLeftUp] + vecData[iLeftLow] +
vecData[iRightUp] + vecData[iRightLow]) / 24;

int iLeft = i * width + (j - waveLength);
int iLower = (i + waveLength) * width + j;
int iRight = i * width + (j + waveLength);
int iUp = (i - waveLength) * width + j;
float sides = ( vecData[iLeft] + vecData[iLower] +
vecData[iRight] + vecData[iUp])/12;
float center = vecData[i * width + j] / 2;
vecData[i * width + j] = corners + sides + center;
}
}
// 插值得到一组二维的连续的噪声图(步长为1)
for(int i = 0;i < height; i++)
{
for(int j = 0; j < width; j++)
{
// 如果i和j都能被waveLength整除,说明该点为已知点
int iModeLength = i % waveLength;
int jModeLength = j % waveLength;
// i和j都不能被waveLength整除
if( iModeLength && jModeLength)
{
float fLeftUp,fLeftLow,fRightUp,fRightLow;
fLeftUp = vecData[ (i - iModeLength) * width + (j - jModeLength) ];
fRightUp = vecData[ (i - iModeLength) * width + (j - jModeLength + waveLength) ];
fLeftLow = vecData[ (i - iModeLength + waveLength) * width +(j - jModeLength)];
fRightLow = vecData[ (i - iModeLength + waveLength) * width + (j - jModeLength + waveLength) ];
float x = (float)iModeLength /(float)waveLength;
float y = (float)jModeLength /(float)waveLength;
float y1 = _interpolation(fLeftUp, fLeftLow, x);
float y2 = _interpolation(fRightUp, fRightLow, x);
vecData[i * width + j] = _interpolation(y1, y2, y);
}
// 只有i不能被整除
else if(iModeLength)
{
float x = (float)iModeLength /(float)waveLength;
vecData[i * width + j] = _interpolation( vecData[(i - iModeLength) * width + j],
vecData[(i - iModeLength + waveLength) * width + j], x );
}
else if(jModeLength)
{
float y = (float)jModeLength /(float)waveLength;
vecData[i * width + j] = _interpolation( vecData[i * width + (j - jModeLength)],
vecData[i * width + (j - jModeLength + waveLength)], y );
}
m_vecData[i * width + j] += vecData[i * width + j];
}
}
// 增大波长和幅度,准备下一次循环
waveLength = waveLength >> 1; // 波长变小一倍
//32 16 8 4分别是步长,中间进行插值,就相当于一个步长是一个周期
//在网上看到的,在一维的情况下,设噪声函数为noise(x),则通过noise(2x),noise(4x)
//等就可以构造更高频率的噪声。这里把noise考虑成sin函数就行了,一样的道理

amplitude *= m_fFallOff

抱歉!评论已关闭.