【WebGL】 Creating 3D
Posted: 30 Mar 2021 08:05 PM PDT
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
<canvas id="c" width=1920 height=1280></canvas>
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
main();
function main() {
/*========== Create a WebGL Context ==========*/
const canvas = document.querySelector("#c");
const gl = canvas.getContext('webgl');
if (!gl) {
console.log('WebGL unavailable');
} else {
console.log('WebGL is good to go');
}
/*========== Define and Store the Geometry ==========*/
const squares = [
// front face
-0.3 , -0.3, -0.3,
0.3, -0.3, -0.3,
0.3, 0.3, -0.3,
-0.3, -0.3, -0.3,
-0.3, 0.3, -0.3,
0.3, 0.3, -0.3,
// back face
-0.2, -0.2, 0.3,
0.4, -0.2, 0.3,
0.4, 0.4, 0.3,
-0.2, -0.2, 0.3,
-0.2, 0.4, 0.3,
0.4, 0.4, 0.3,
// top face
-0.3, 0.3, -0.3,
0.3, 0.3, -0.3,
-0.2, 0.4, 0.3,
0.4, 0.4, 0.3,
0.3, 0.3, -0.3,
-0.2, 0.4, 0.3,
];
// buffer
const origBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, origBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(squares), gl.STATIC_DRAW);
const squareColors = [
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0,
0.0, 0.0, 1.0, 1.0,
1.0, 0.0, 0.0, 1.0,
1.0, 0.0, 0.0, 1.0,
1.0, 0.0, 0.0, 1.0,
1.0, 0.0, 0.0, 1.0,
1.0, 0.0, 0.0, 1.0,
1.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
];
const colorBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, colorBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(squareColors), gl.STATIC_DRAW);
/*========== Shaders ==========*/
const vsSource = `
attribute vec4 aPosition;
attribute vec4 aVertexColor;
varying lowp vec4 vColor;
void main() {
gl_Position = aPosition;
vColor = aVertexColor;
}
`;
const fsSource = `
varying lowp vec4 vColor;
void main() {
gl_FragColor = vColor;
}
`;
//create shaders
const vertexShader = gl.createShader(gl.VERTEX_SHADER);
const fragmentShader = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(vertexShader, vsSource);
gl.shaderSource(fragmentShader, fsSource);
// compile shaders
gl.compileShader(vertexShader);
if (!gl.getShaderParameter(vertexShader, gl.COMPILE_STATUS)) {
alert('An error occurred compiling the shaders: ' + gl.getShaderInfoLog(vertexShader));
gl.deleteShader(vertexShader);
return null;
}
gl.compileShader(fragmentShader);
if (!gl.getShaderParameter(fragmentShader, gl.COMPILE_STATUS)) {
alert('An error occurred compiling the shaders: ' + gl.getShaderInfoLog(fragmentShader));
gl.deleteShader(fragmentShader);
return null;
}
// create program
const program = gl.createProgram();
gl.attachShader(program, vertexShader);
gl.attachShader(program, fragmentShader);
// link program
gl.linkProgram(program);
gl.useProgram(program);
/*========== Connect the attribute with the vertex shader ==========*/
const posAttribLocation = gl.getAttribLocation(program, "aPosition");
gl.bindBuffer(gl.ARRAY_BUFFER, origBuffer);
gl.vertexAttribPointer(posAttribLocation, 3, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(posAttribLocation);
const colorAttribLocation = gl.getAttribLocation(program, "aVertexColor");
gl.bindBuffer(gl.ARRAY_BUFFER, colorBuffer);
gl.vertexAttribPointer(colorAttribLocation, 4, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(colorAttribLocation);
/*========== Drawing ========== */
gl.clearColor(1, 1, 1, 1);
gl.enable(gl.DEPTH_TEST);
//gl.depthFunc(gl.LEQUAL);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
//gl.clear(gl.COLOR_BUFFER_BIT);
// Draw the points on the screen
const mode = gl.TRIANGLES;
const first = 0;
const count = 18;
gl.drawArrays(mode, first, count);
}
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
canvas {
width: 100vw;
height: 100vh;
}
【PYTHON OPENCV】Handwritten digits recognition using KNN and raw pixels as features and varying both k and the number of training/testing images
Posted: 30 Mar 2021 07:55 PM PDT
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
"""
Handwritten digits recognition using KNN and raw pixels as features and varying both k and the number of
training/testing images
"""
# Import required packages:
import cv2
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
# Constants:
SIZE_IMAGE = 20
NUMBER_CLASSES = 10
def load_digits_and_labels(big_image):
"""Returns all the digits from the 'big' image and creates the corresponding labels for each image"""
# Load the 'big' image containing all the digits:
digits_img = cv2.imread(big_image, 0)
# Get all the digit images from the 'big' image:
number_rows = digits_img.shape[1] / SIZE_IMAGE
rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)
digits = []
for row in rows:
row_cells = np.hsplit(row, number_rows)
for digit in row_cells:
digits.append(digit)
digits = np.array(digits)
# Create the labels for each image:
labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
return digits, labels
def get_accuracy(predictions, labels):
"""Returns the accuracy based on the coincidences between predictions and labels"""
accuracy = (np.squeeze(predictions) == labels).mean()
return accuracy * 100
def raw_pixels(img):
"""Return raw pixels as feature from the image"""
return img.flatten()
# Load all the digits and the corresponding labels:
digits, labels = load_digits_and_labels('digits.png')
# Shuffle data
# Constructs a random number generator:
rand = np.random.RandomState(1234)
# Randomly permute the sequence:
shuffle = rand.permutation(len(digits))
digits, labels = digits[shuffle], labels[shuffle]
# Compute the descriptors for all the images.
# In this case, the raw pixels are the feature descriptors
raw_descriptors = []
for img in digits:
raw_descriptors.append(np.float32(raw_pixels(img)))
raw_descriptors = np.squeeze(raw_descriptors)
# Split data into training/testing:
split_values = np.arange(0.1, 1, 0.1)
# Create a dictionary to store the accuracy when testing:
results = defaultdict(list)
# Create KNN:
knn = cv2.ml.KNearest_create()
for split_value in split_values:
# Split the data into training and testing:
partition = int(split_value * len(raw_descriptors))
raw_descriptors_train, raw_descriptors_test = np.split(raw_descriptors, [partition])
labels_train, labels_test = np.split(labels, [partition])
# Train KNN model
print('Training KNN model - raw pixels as features')
knn.train(raw_descriptors_train, cv2.ml.ROW_SAMPLE, labels_train)
# Store the accuracy when testing:
for k in np.arange(1, 10):
ret, result, neighbours, dist = knn.findNearest(raw_descriptors_test, k)
acc = get_accuracy(result, labels_test)
print(" {}".format("%.2f" % acc))
results[int(split_value * 100)].append(acc)
# Show all results using matplotlib capabilities:
# Create the dimensions of the figure and set title:
fig = plt.figure(figsize=(12, 5))
plt.suptitle("k-NN handwritten digits recognition", fontsize=14, fontweight='bold')
fig.patch.set_facecolor('silver')
ax = plt.subplot(1, 1, 1)
ax.set_xlim(0, 10)
dim = np.arange(1, 10)
for key in results:
ax.plot(dim, results[key], linestyle='--', marker='o', label=str(key) + "%")
plt.legend(loc='upper left', title="% training")
plt.title('Accuracy of the KNN model varying both k and the percentage of images to train/test')
plt.xlabel("number of k")
plt.ylabel("accuracy")
plt.show()
No comments:
Post a Comment