技術分享教學

aws s3 multipart upload + html5 file api chunk + php server side code

這個問題困擾我好久, 後來找到html5 chunk file + php server side 的解法!

稍微改了一下, 加上aws s3 multipart upload

根據aws 文件中的說明, aws s3 multipart 的檔案, 最小是5M , 所以demo中我用10MB來跑

1.13G 切 10M part 上傳
280秒上傳完113個part
Ec2 local merge + aws multpart merge 是80秒左右
約莫5分鐘內可以傳完!

其中html5 中的file api , 要實作chunk file , 用的是slice 。
不過chrome 跟firefox 有些略不同!

目前此demo 在IE 9~11 上, Firefox latest, chrome latest 上都可測試成功!

<?php
// Include the AWS SDK using the Composer autoloader
require '/var/www/vendor/autoload.php';
use Aws\S3\S3Client;
define('AWS_KEY', 'xxxxxxx');
define('AWS_SECRET', 'xxxxxxx');
define('BUCKET_NAME', 'xxxxxx');
define('AWS_REGION', 'xxxxxxx');
// name must be in proper format
if (!isset($_REQUEST['name'])) {
throw new Exception('Name required');
}
if (!preg_match('/^[-a-z0-9_][-a-z0-9_.]*$/i', $_REQUEST['name'])) {
throw new Exception('Name error');
}
// index must be set, and number
if (!isset($_REQUEST['index'])) {
throw new Exception('Index required');
}
if (!preg_match('/^[0-9]+$/', $_REQUEST['index'])) {
throw new Exception('Index error');
}
$bucket = BUCKET_NAME;
$keyname = $_REQUEST['name'];
$filename = $keyname;
// 1. Instantiate the client.
$s3 = S3Client::factory(array(
'key' => AWS_KEY,
'secret' => AWS_SECRET,
'region' => AWS_REGION
));
// 2. Create a new multipart upload and get the upload ID.
$result = $s3->createMultipartUpload(array(
'Bucket' => BUCKET_NAME,
'Key' => $keyname,
'StorageClass' => 'REDUCED_REDUNDANCY',
'ACL' => 'public-read',
'Metadata' => array(
'param1' => 'value 1',
'param2' => 'value 2',
'param3' => 'value 3'
)
));
$uploadId = $result['UploadId'];
try {
$target = "uploads/full_" . $_REQUEST['name'];
$dst = fopen($target, 'wb');
$parts = array();
$partNumber = 1;
for ($i = 0; $i < $_REQUEST['index']; $i++) {
//local merge
$slice = 'uploads/' . $_REQUEST['name'] . '/' . $_REQUEST['name'] . '-' . $i;
$src = fopen($slice, 'rb');
stream_copy_to_stream($src, $dst);
//aws upload multipart
$file = fopen($slice, 'r');
$result = $s3->uploadPart(array(
'Bucket' => $bucket,
'Key' => $keyname,
'UploadId' => $uploadId,
'PartNumber' => $partNumber,
'Body' => fread($file, 10 * 1024 * 1024)
));
$parts[] = array(
'PartNumber' => $partNumber++,
'ETag' => $result['ETag'],
);
echo "Uploading part {$partNumber} of {$filename}.\n";
fclose($src);
unlink($slice);
}
fclose($dst);
} catch (S3Exception $e) {
$result = $s3->abortMultipartUpload(array(
'Bucket' => $bucket,
'Key' => $keyname,
'UploadId' => $uploadId
));
echo "Upload of {$filename} failed.\n";
}
// 4. Complete multipart upload.
$result = $s3->completeMultipartUpload(array(
'Bucket' => $bucket,
'Key' => $keyname,
'UploadId' => $uploadId,
'Parts' => $parts,
));
$url = $result['Location'];
//local merge finish
echo "Uploaded {$filename} to {$url}.\n";
rmdir("uploads/" . $_REQUEST['name']);
copy("uploads/full_" . $_REQUEST['name'], "uploads/" . $_REQUEST['name']);
unlink("uploads/full_" . $_REQUEST['name']);
<script type="text/javascript">
const BYTES_PER_CHUNK = 10 * 1024 * 1024; // 1MB chunk sizes.
var slices; // slices, value that gets decremented
var slicesTotal; // total amount of slices, constant once calculated
/**
* Calculates slices and indirectly uploads a chunk of a file via uploadFile()
**/
function sendRequest() {
var xhr;
var blob = document.getElementById('fileToUpload').files[0];
var start = 0;
var end;
var index = 0;
// calculate the number of slices
slices = Math.ceil(blob.size / BYTES_PER_CHUNK);
slicesTotal = slices;
while(start < blob.size) {
end = start + BYTES_PER_CHUNK;
if(end > blob.size) {
end = blob.size;
}
uploadFile(blob, index, start, end);
start = end;
index++;
}
}
/**
* Blob to ArrayBuffer (needed ex. on Android 4.0.4)
**/
var str2ab_blobreader = function(str, callback) {
var blob;
BlobBuilder = window.MozBlobBuilder || window.WebKitBlobBuilder || window.BlobBuilder;
if (typeof(BlobBuilder) !== 'undefined') {
var bb = new BlobBuilder();
bb.append(str);
blob = bb.getBlob();
} else {
blob = new Blob([str]);
}
var f = new FileReader();
f.onload = function(e) {
callback(e.target.result)
}
f.readAsArrayBuffer(blob);
}
/**
* Performs actual upload, adjustes progress bars
*
* @param blob
* @param index
* @param start
* @param end
*/
function uploadFile(blob, index, start, end) {
var xhr;
var end;
var chunk;
xhr = new XMLHttpRequest();
xhr.onreadystatechange = function() {
if(xhr.readyState == 4) {
if(xhr.responseText) {
alert(xhr.responseText);
}
slices--;
// if we have finished all slices
if(slices == 0) {
mergeFile(blob);
}
}
};
if (blob.webkitSlice) {
chunk = blob.webkitSlice(start, end);
} else if (blob.mozSlice) {
chunk = blob.mozSlice(start, end);
} else {
chunk = blob.slice(start, end);
}
xhr.addEventListener("load", function (evt) {
var percentageDiv = document.getElementById("percent");
var progressBar = document.getElementById("progressBar");
progressBar.max = progressBar.value = 100;
percentageDiv.innerHTML = "100%";
}, false);
xhr.upload.addEventListener("progress", function (evt) {
var percentageDiv = document.getElementById("percent");
var progressBar = document.getElementById("progressBar");
if (evt.lengthComputable) {
progressBar.max = slicesTotal;
progressBar.value = index;
percentageDiv.innerHTML = Math.round(index/slicesTotal * 100) + "%";
}
}, false);
xhr.open("post", "upload.php", true);
xhr.setRequestHeader("X-File-Name", blob.name); // custom header with filename and full size
xhr.setRequestHeader("X-File-Size", blob.size);
xhr.setRequestHeader("X-Index", index); // part identifier
if (blob.webkitSlice) { // android default browser in version 4.0.4 has webkitSlice instead of slice()
var buffer = str2ab_blobreader(chunk, function(buf) { // we cannot send a blob, because body payload will be empty
xhr.send(buf); // thats why we send an ArrayBuffer
});
} else {
xhr.send(chunk); // but if we support slice() everything should be ok
}
}
/**
* Function executed once all of the slices has been sent, "TO MERGE THEM ALL!"
**/
function mergeFile(blob) {
var xhr;
var fd;
xhr = new XMLHttpRequest();
fd = new FormData();
fd.append("name", blob.name);
fd.append("index", slicesTotal);
xhr.open("POST", "merge-aws.php", true);
xhr.send(fd);
}
</script>
<div id="percent">Waiting...</div>
<input type="file" name="file" id="fileToUpload"> <button onclick="sendRequest()">Send</button>
<progress id="progressBar" value="0" max="100"></progress>
<?php
ini_set("maxdb_execute_time",0);
ini_set("max_input_time",0);
$filename = $_SERVER['HTTP_X_FILE_NAME'];
$filesize = $_SERVER['HTTP_X_FILE_SIZE'];
$index = $_SERVER['HTTP_X_INDEX'];
// name must be in proper format
if (!isset($_SERVER['HTTP_X_FILE_NAME'])) {
throw new Exception('Name required');
}
//only for demo test so simple file name rule
if (!preg_match('/^[-a-z0-9_][-a-z0-9_.]*$/i', $_SERVER['HTTP_X_FILE_NAME'])) {
throw new Exception('Name error');
}
// index must be set, and number
if (!isset($_SERVER['HTTP_X_INDEX'])) {
throw new Exception('Index required');
}
if (!preg_match('/^[0-9]+$/', $_SERVER['HTTP_X_INDEX'])) {
throw new Exception('Index error');
}
// we store chunks in directory named after filename
if (!file_exists("uploads/" . $filename .'/')){
mkdir("uploads/" . $filename .'/');
}
$target = "uploads/" . $filename . '/' . $filename . '-' . $index;
$input = fopen("php://input", "r");
file_put_contents($target, $input);