commit a4649a40db388c7754179be4b8ea0755f0754a75 Author: Markus Nellen-Schmitz <@markusnellenschmitz> Date: Fri Feb 20 22:56:57 2026 +0100 initial commit diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..3ba17fa --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +output/* +!output/.gitkeep + +data/* +!data/.gitkeep \ No newline at end of file diff --git a/PakUnpacker.js b/PakUnpacker.js new file mode 100644 index 0000000..c0c9cb7 --- /dev/null +++ b/PakUnpacker.js @@ -0,0 +1,626 @@ +const fs = require('fs'); +const path = require('path'); +const zlib = require('zlib'); + +/** + * Entry types in PAK files + */ +const EntryType = { + Directory: 0, + File: 1 +}; + +/** + * Compression types for PAK entries + */ +const CompressionType = { + None: 0, + Zlib: 0x106 +}; + +/** + * Represents a file entry in a PAK archive + */ +class PakEntryFile { + constructor(entryName, reader) { + this.name = entryName; + this.offset = reader.readInt32LE(); + this.size = reader.readInt32LE(); + this.originalSize = reader.readInt32LE(); + reader.skip(4); + this.compression = reader.readInt32BE(); + this.unknown = reader.readBytes(4); + } +} + +/** + * Binary reader for PAK files with helper methods + */ +class PakReader { + constructor(buffer) { + this.buffer = buffer; + this.position = 0; + } + + canRead(length) { + return length >= 0 && this.position + length <= this.buffer.length; + } + + readBytes(count) { + if (!this.canRead(count)) { + throw new Error(`Cannot read ${count} bytes at position ${this.position}`); + } + const bytes = this.buffer.slice(this.position, this.position + count); + this.position += count; + return bytes; + } + + readByte() { + return this.readBytes(1)[0]; + } + + readInt32LE() { + if (!this.canRead(4)) { + console.warn('Warning: Not enough data to read 4 bytes for an Int32.'); + return 0; + } + const value = this.buffer.readInt32LE(this.position); + this.position += 4; + return value; + } + + readInt32BE() { + if (!this.canRead(4)) { + console.warn('Warning: Not enough data to read 4 bytes for an Int32.'); + return 0; + } + const value = this.buffer.readInt32BE(this.position); + this.position += 4; + return value; + } + + readStringUtf8(length) { + if (length <= 0) { + console.warn('Warning: Attempted to read a string with non-positive length.'); + return ''; + } + + if (!this.canRead(length)) { + console.warn(`Warning: Not enough data to read a string of length ${length}.`); + return ''; + } + + try { + const bytes = this.readBytes(length); + return bytes.toString('utf8'); + } catch (ex) { + console.error(`Error while reading the string: ${ex.message}`); + return 'InvalidEntry'; + } + } + + skip(count) { + if (this.canRead(count)) { + this.position += count; + } else { + console.warn(`Warning: Attempt to skip ${count} bytes, but not enough data remains.`); + this.position = this.buffer.length; + } + } + + skipSignature() { + if (this.canRead(4)) { + this.position += 4; + } else { + console.warn('Warning: Not enough data to skip the signature.'); + this.position = this.buffer.length; + } + } + + pos() { + return this.position; + } + + seek(position) { + if (position >= 0 && position <= this.buffer.length) { + this.position = position; + } else { + throw new Error(`Invalid seek position: ${position}`); + } + } +} + +/** + * Main PAK unpacker class for Arma Reforger .pak files + */ +class PakUnpacker { + constructor(filePath) { + this.filePath = filePath; + this.formSize = 0; + this.dataSize = 0; + this.entriesSize = 0; + this.entries = []; + this.buffer = null; + } + + /** + * Load and parse the PAK file + * @returns {Promise} + */ + async load() { + this.buffer = await fs.promises.readFile(this.filePath); + const reader = new PakReader(this.buffer); + + this.readForm(reader); + this.readHead(reader); + this.readData(reader); + this.readEntries(reader); + } + + /** + * Load PAK file synchronously + */ + loadSync() { + this.buffer = fs.readFileSync(this.filePath); + const reader = new PakReader(this.buffer); + + this.readForm(reader); + this.readHead(reader); + this.readData(reader); + this.readEntries(reader); + } + + readForm(reader) { + reader.skipSignature(); + this.formSize = reader.readInt32BE(); + reader.skipSignature(); + } + + readHead(reader) { + reader.skipSignature(); + const headBytes = reader.readBytes(32); + console.log('Head of the Pak File:', headBytes.toString('base64')); + } + + readData(reader) { + reader.skipSignature(); + this.dataSize = reader.readInt32BE(); + reader.skip(this.dataSize); + } + + readEntries(reader) { + reader.skipSignature(); + this.entriesSize = reader.readInt32BE(); + + try { + reader.skip(2); + reader.skip(4); + + const posEntries = reader.pos(); + while (reader.pos() - posEntries < this.entriesSize) { + const previousPos = reader.pos(); + + try { + if (!reader.canRead(2)) break; + + const entryType = reader.readByte(); + const entryNameLength = reader.readByte(); + + if (!reader.canRead(entryNameLength)) break; + + const entryName = reader.readStringUtf8(entryNameLength); + + if (entryType === EntryType.Directory) { + this.readEntriesFromDirectory(entryName, reader); + } else { + this.entries.push(new PakEntryFile(entryName, reader)); + } + } catch (ex) { + console.error(`Error while reading an entry: ${ex.message}`); + reader.seek(previousPos); + break; + } + } + } catch (ex) { + console.error(`Error while processing the entries: ${ex.message}`); + } + } + + readEntriesFromDirectory(dirName, reader) { + if (!reader.canRead(4)) { + console.warn("Warning: Can't read directory entry as there is not enough data."); + return; + } + + const childCount = reader.readInt32LE(); + + for (let i = 0; i < childCount; i++) { + const previousPos = reader.pos(); + + try { + if (!reader.canRead(2)) break; + + const entryType = reader.readByte(); + const entryNameLength = reader.readByte(); + + if (!reader.canRead(entryNameLength)) break; + + const entryName = path.join(dirName, reader.readStringUtf8(entryNameLength)); + + if (entryType === EntryType.File) { + this.entries.push(new PakEntryFile(entryName, reader)); + } else if (entryType === EntryType.Directory) { + this.readEntriesFromDirectory(entryName, reader); + } else { + console.warn('Unknown entry type.'); + } + } catch (ex) { + console.error(`Error while reading a directory entry: ${ex.message}`); + reader.seek(previousPos); + break; + } + } + } + + /** + * Extract all files to the destination directory + * @param {string} dstDir - Destination directory + * @returns {Promise} + */ + async extract(dstDir) { + await fs.promises.mkdir(dstDir, { recursive: true }); + + for (const entry of this.entries) { + try { + const destPath = path.join(dstDir, entry.name); + const destDirPath = path.dirname(destPath); + + await fs.promises.mkdir(destDirPath, { recursive: true }); + + if (entry.offset + entry.size > this.buffer.length) { + console.warn(`Warning: Entry '${entry.name}' exceeds file size. Skipping.`); + continue; + } + + const fileData = this.buffer.slice(entry.offset, entry.offset + entry.size); + + if (entry.compression === CompressionType.Zlib) { + try { + const decompressed = await this.decompressZlib(fileData, entry.originalSize); + await fs.promises.writeFile(destPath, decompressed); + console.log(`Extracted (Zlib): ${entry.name}`); + } catch (ex) { + console.error(`Error during Zlib decompression: ${entry.name}`, ex.message); + await fs.promises.writeFile(destPath + '_Error', fileData); + } + } else { + await fs.promises.writeFile(destPath, fileData); + console.log(`Extracted: ${entry.name}`); + } + } catch (ex) { + console.error(`Error while extracting a file (${entry.name}): ${ex.message}`); + } + } + } + + /** + * Extract all files synchronously + * @param {string} dstDir - Destination directory + */ + extractSync(dstDir) { + fs.mkdirSync(dstDir, { recursive: true }); + + for (const entry of this.entries) { + try { + const destPath = path.join(dstDir, entry.name); + const destDirPath = path.dirname(destPath); + + fs.mkdirSync(destDirPath, { recursive: true }); + + if (entry.offset + entry.size > this.buffer.length) { + console.warn(`Warning: Entry '${entry.name}' exceeds file size. Skipping.`); + continue; + } + + const fileData = this.buffer.slice(entry.offset, entry.offset + entry.size); + + if (entry.compression === CompressionType.Zlib) { + try { + const decompressed = this.decompressZlibSync(fileData, entry.originalSize); + fs.writeFileSync(destPath, decompressed); + console.log(`Extracted (Zlib): ${entry.name}`); + } catch (ex) { + console.error(`Error during Zlib decompression: ${entry.name}`, ex.message); + fs.writeFileSync(destPath + '_Error', fileData); + } + } else { + fs.writeFileSync(destPath, fileData); + console.log(`Extracted: ${entry.name}`); + } + } catch (ex) { + console.error(`Error while extracting a file (${entry.name}): ${ex.message}`); + } + } + } + + /** + * Decompress Zlib data asynchronously + * @param {Buffer} data - Compressed data + * @param {number} expectedSize - Expected size after decompression + * @returns {Promise} + */ + decompressZlib(data, expectedSize) { + return new Promise((resolve, reject) => { + if (data.length < 2) { + return reject(new Error('The input data is too short for decompression.')); + } + + if (data[0] !== 0x78) { + return reject(new Error('Invalid Zlib header.')); + } + + // Skip the 2-byte Zlib header and use raw deflate + const deflateData = data.slice(2); + + zlib.inflateRaw(deflateData, (err, result) => { + if (err) { + return reject(err); + } + + if (result.length < expectedSize) { + console.warn(`Warning: Decompressed size (${result.length} bytes) is smaller than expected (${expectedSize} bytes).`); + } + + resolve(result); + }); + }); + } + + /** + * Decompress Zlib data synchronously + * @param {Buffer} data - Compressed data + * @param {number} expectedSize - Expected size after decompression + * @returns {Buffer} + */ + decompressZlibSync(data, expectedSize) { + if (data.length < 2) { + throw new Error('The input data is too short for decompression.'); + } + + if (data[0] !== 0x78) { + throw new Error('Invalid Zlib header.'); + } + + // Skip the 2-byte Zlib header and use raw deflate + const deflateData = data.slice(2); + const result = zlib.inflateRawSync(deflateData); + + if (result.length < expectedSize) { + console.warn(`Warning: Decompressed size (${result.length} bytes) is smaller than expected (${expectedSize} bytes).`); + } + + return result; + } + + /** + * Get a specific file's data by name + * @param {string} fileName - Name of the file to extract + * @returns {Promise} The file data or null if not found + */ + async getFile(fileName) { + const entry = this.entries.find(e => e.name === fileName || e.name.endsWith(fileName)); + + if (!entry) { + return null; + } + + if (entry.offset + entry.size > this.buffer.length) { + throw new Error(`Entry '${entry.name}' exceeds file size.`); + } + + const fileData = this.buffer.slice(entry.offset, entry.offset + entry.size); + + if (entry.compression === CompressionType.Zlib) { + return await this.decompressZlib(fileData, entry.originalSize); + } else { + return fileData; + } + } + + /** + * Get a specific file's data by name synchronously + * @param {string} fileName - Name of the file to extract + * @returns {Buffer|null} The file data or null if not found + */ + getFileSync(fileName) { + const entry = this.entries.find(e => e.name === fileName || e.name.endsWith(fileName)); + + if (!entry) { + return null; + } + + if (entry.offset + entry.size > this.buffer.length) { + throw new Error(`Entry '${entry.name}' exceeds file size.`); + } + + const fileData = this.buffer.slice(entry.offset, entry.offset + entry.size); + + if (entry.compression === CompressionType.Zlib) { + return this.decompressZlibSync(fileData, entry.originalSize); + } else { + return fileData; + } + } + + /** + * Extract a single file to a specific output path + * @param {string} fileName - Name of the file to extract from the archive + * @param {string} outputPath - Full path where the file should be saved + * @returns {Promise} True if successful, false if file not found + */ + async extractFile(fileName, outputPath) { + const entry = this.entries.find(e => e.name === fileName || e.name.endsWith(fileName)); + + if (!entry) { + console.warn(`File not found in archive: ${fileName}`); + return false; + } + + try { + // Create directory if it doesn't exist + const destDirPath = path.dirname(outputPath); + await fs.promises.mkdir(destDirPath, { recursive: true }); + + // Get file data + const fileData = await this.getFile(fileName); + + if (fileData) { + await fs.promises.writeFile(outputPath, fileData); + console.log(`Extracted: ${entry.name} -> ${outputPath}`); + return true; + } + + return false; + } catch (ex) { + console.error(`Error while extracting file (${fileName}): ${ex.message}`); + throw ex; + } + } + + /** + * Extract a single file to a specific output path synchronously + * @param {string} fileName - Name of the file to extract from the archive + * @param {string} outputPath - Full path where the file should be saved + * @returns {boolean} True if successful, false if file not found + */ + extractFileSync(fileName, outputPath) { + const entry = this.entries.find(e => e.name === fileName || e.name.endsWith(fileName)); + + if (!entry) { + console.warn(`File not found in archive: ${fileName}`); + return false; + } + + try { + // Create directory if it doesn't exist + const destDirPath = path.dirname(outputPath); + fs.mkdirSync(destDirPath, { recursive: true }); + + // Get file data + const fileData = this.getFileSync(fileName); + + if (fileData) { + fs.writeFileSync(outputPath, fileData); + console.log(`Extracted: ${entry.name} -> ${outputPath}`); + return true; + } + + return false; + } catch (ex) { + console.error(`Error while extracting file (${fileName}): ${ex.message}`); + throw ex; + } + } + + /** + * Extract files matching a pattern (e.g., "*.conf", "*.json", "models/*.xml") + * @param {string} pattern - Glob-like pattern to match files (supports * wildcard) + * @param {string} outputDir - Directory where files should be extracted + * @returns {Promise>} Array of extracted file paths + */ + async extractByPattern(pattern, outputDir) { + const matchedFiles = this.findFilesByPattern(pattern); + const extractedFiles = []; + + if (matchedFiles.length === 0) { + console.warn(`No files found matching pattern: ${pattern}`); + return extractedFiles; + } + + console.log(`Found ${matchedFiles.length} file(s) matching pattern: ${pattern}`); + + for (const fileName of matchedFiles) { + try { + const outputPath = path.join(outputDir, fileName); + const success = await this.extractFile(fileName, outputPath); + + if (success) { + extractedFiles.push(outputPath); + } + } catch (ex) { + console.error(`Failed to extract ${fileName}: ${ex.message}`); + } + } + + return extractedFiles; + } + + /** + * Extract files matching a pattern synchronously + * @param {string} pattern - Glob-like pattern to match files (supports * wildcard) + * @param {string} outputDir - Directory where files should be extracted + * @returns {Array} Array of extracted file paths + */ + extractByPatternSync(pattern, outputDir) { + const matchedFiles = this.findFilesByPattern(pattern); + const extractedFiles = []; + + if (matchedFiles.length === 0) { + console.warn(`No files found matching pattern: ${pattern}`); + return extractedFiles; + } + + console.log(`Found ${matchedFiles.length} file(s) matching pattern: ${pattern}`); + + for (const fileName of matchedFiles) { + try { + const outputPath = path.join(outputDir, fileName); + const success = this.extractFileSync(fileName, outputPath); + + if (success) { + extractedFiles.push(outputPath); + } + } catch (ex) { + console.error(`Failed to extract ${fileName}: ${ex.message}`); + } + } + + return extractedFiles; + } + + /** + * Find files matching a pattern + * @param {string} pattern - Pattern to match (e.g., "*.conf", "config/*.json", "**test.xml") + * @returns {Array} Array of matching file names + */ + findFilesByPattern(pattern) { + // Convert simple glob pattern to regex + const regexPattern = pattern + .replace(/\./g, '\\.') // Escape dots + .replace(/\*\*/g, '§GLOBSTAR§') // Temporarily replace ** + .replace(/\*/g, '[^/\\\\]*') // * matches anything except path separators + .replace(/§GLOBSTAR§/g, '.*') // ** matches anything including path separators + .replace(/\?/g, '[^/\\\\]'); // ? matches single character except path separator + + const regex = new RegExp(`^${regexPattern}$`, 'i'); // Case-insensitive + + return this.entries + .map(entry => entry.name) + .filter(name => regex.test(name.replace(/\\/g, '/'))); // Normalize path separators + } + + /** + * List all files in the PAK archive + * @returns {Array<{name: string, size: number, originalSize: number, compressed: boolean}>} + */ + listFiles() { + return this.entries.map(entry => ({ + name: entry.name, + size: entry.size, + originalSize: entry.originalSize, + compressed: entry.compression === CompressionType.Zlib + })); + } +} + +module.exports = { PakUnpacker, EntryType, CompressionType }; diff --git a/README.md b/README.md new file mode 100644 index 0000000..0988049 --- /dev/null +++ b/README.md @@ -0,0 +1,218 @@ +# Reforger PAK Unpacker (Node.js) + +Node.js library to extract files from Arma Reforger `.pak` archive files. + +## Features + +- Extract all files from PAK archives +- Extract specific files by name +- List all files in an archive +- Support for Zlib compressed files +- Async and sync APIs +- Easy integration into Express.js or other Node.js applications + +## Installation + +```bash +npm install +``` + +## Usage + +### Basic Example - Extract All Files + +```javascript +const { PakUnpacker } = require('./PakUnpacker'); + +async function extractPak() { + const unpacker = new PakUnpacker('path/to/file.pak'); + await unpacker.load(); + + console.log(`Found ${unpacker.entries.length} files`); + + await unpacker.extract('./output'); + console.log('Extraction complete!'); +} + +extractPak(); +``` + +### List Files in Archive + +```javascript +const { PakUnpacker } = require('./PakUnpacker'); + +async function listFiles() { + const unpacker = new PakUnpacker('path/to/file.pak'); + await unpacker.load(); + + const files = unpacker.listFiles(); + files.forEach(file => { + console.log(`${file.name} - ${file.size} bytes (${file.compressed ? 'Compressed' : 'Uncompressed'})`); + }); +} + +listFiles(); +``` + +### Extract a Specific File + +```javascript +const { PakUnpacker } = require('./PakUnpacker'); + +async function extractFile() { + const unpacker = new PakUnpacker('path/to/file.pak'); + await unpacker.load(); + + const fileData = await unpacker.getFile('config.json'); + + if (fileData) { + console.log('File content:', fileData.toString('utf8')); + } else { + console.log('File not found'); + } +} + +extractFile(); +``` + +### Synchronous API + +```javascript +const { PakUnpacker } = require('./PakUnpacker'); + +function extractPakSync() { + const unpacker = new PakUnpacker('path/to/file.pak'); + unpacker.loadSync(); + + console.log(`Found ${unpacker.entries.length} files`); + + unpacker.extractSync('./output'); + console.log('Extraction complete!'); +} + +extractPakSync(); +``` + +## API Integration Example + +### Express.js REST API + +```javascript +const express = require('express'); +const { PakUnpacker } = require('./PakUnpacker'); +const app = express(); + +// List all files in a PAK archive +app.get('/api/pak/:pakName/files', async (req, res) => { + try { + const unpacker = new PakUnpacker(`./paks/${req.params.pakName}.pak`); + await unpacker.load(); + + const files = unpacker.listFiles(); + res.json({ success: true, files }); + } catch (error) { + res.status(500).json({ success: false, error: error.message }); + } +}); + +// Extract a specific file +app.get('/api/pak/:pakName/extract/:fileName(*)', async (req, res) => { + try { + const unpacker = new PakUnpacker(`./paks/${req.params.pakName}.pak`); + await unpacker.load(); + + const fileData = await unpacker.getFile(req.params.fileName); + + if (fileData) { + res.setHeader('Content-Type', 'application/octet-stream'); + res.send(fileData); + } else { + res.status(404).json({ success: false, error: 'File not found' }); + } + } catch (error) { + res.status(500).json({ success: false, error: error.message }); + } +}); + +app.listen(3000, () => { + console.log('Server running on port 3000'); +}); +``` + +## PAK File Format + +Reforger PAK files have the following structure: + +1. **FORM Block**: Header with size (Big Endian Int32) +2. **HEAD Block**: 32 bytes header data +3. **DATA Block**: Size + data block +4. **Entries Block**: Directory and file entries + +### Entry Types +- **Type 0**: Directory (with child count) +- **Type 1**: File (with offset, size, original size, compression info) + +### Compression +- **None** (0x00): No compression +- **Zlib** (0x106): Deflate compression with 2-byte Zlib header + +## API Reference + +### Class: PakUnpacker + +#### Constructor +```javascript +new PakUnpacker(filePath) +``` +- `filePath` (string): Path to the .pak file + +#### Methods + +##### async load() +Load and parse the PAK file asynchronously. + +##### loadSync() +Load and parse the PAK file synchronously (blocking). + +##### async extract(dstDir) +Extract all files to the destination directory. +- `dstDir` (string): Destination directory path + +##### extractSync(dstDir) +Extract all files synchronously. +- `dstDir` (string): Destination directory path + +##### async getFile(fileName) +Get a specific file's data by name. +- `fileName` (string): Name or partial name of the file +- Returns: `Promise` + +##### getFileSync(fileName) +Get a specific file's data synchronously. +- `fileName` (string): Name or partial name of the file +- Returns: `Buffer|null` + +##### listFiles() +List all files in the PAK archive. +- Returns: Array of objects with `{name, size, originalSize, compressed}` properties + +#### Properties + +- `entries` (Array): Array of PakEntryFile objects +- `formSize` (number): Size of the FORM block +- `dataSize` (number): Size of the DATA block +- `entriesSize` (number): Size of the entries block + +## Requirements + +- Node.js >= 14.0.0 +- No external dependencies (uses built-in modules only) + +## License + +MIT + +## Credits + +Converted from the original C# implementation. diff --git a/data/.gitkeep b/data/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/example.js b/example.js new file mode 100644 index 0000000..781c4ab --- /dev/null +++ b/example.js @@ -0,0 +1,240 @@ +const { PakUnpacker } = require('./PakUnpacker'); +const path = require('path'); + +/** + * Example 1: Extract all files from a PAK archive + */ +async function extractAllFiles() { + const pakFile = path.join(__dirname, 'data.pak'); // Adjust path as needed + const outputDir = path.join(__dirname, '../output'); + + try { + console.log('Loading PAK file...'); + const unpacker = new PakUnpacker(pakFile); + await unpacker.load(); + + console.log(`Found ${unpacker.entries.length} files in the archive.`); + + console.log('\nExtracting files...'); + await unpacker.extract(outputDir); + + console.log('\nExtraction complete!'); + } catch (error) { + console.error('Error:', error.message); + } +} + +/** + * Example 2: List all files without extracting + */ +async function listFiles() { + const pakFile = path.join(__dirname, '../test.pak'); + + try { + const unpacker = new PakUnpacker(pakFile); + await unpacker.load(); + + const files = unpacker.listFiles(); + + console.log(`\nFiles in archive (${files.length} total):\n`); + files.forEach(file => { + const compressed = file.compressed ? '[Zlib]' : '[None]'; + console.log(`${compressed} ${file.name} (${file.size} -> ${file.originalSize} bytes)`); + }); + } catch (error) { + console.error('Error:', error.message); + } +} + +/** + * Example 3: Extract a specific file + */ +async function extractSingleFile() { + const pakFile = path.join(__dirname, 'data.pak'); + const fileName = 'config.json'; // Change to your target file + const outputPath = path.join(__dirname, '../output', fileName); + + try { + const unpacker = new PakUnpacker(pakFile); + await unpacker.load(); + + // Method 1: Extract file directly to disk + const success = await unpacker.extractFile(fileName, outputPath); + + if (success) { + console.log(`\nSuccessfully extracted ${fileName}`); + } else { + console.log(`File not found: ${fileName}`); + } + + // Method 2: Get file data in memory (without saving) + // const fileData = await unpacker.getFile(fileName); + // if (fileData) { + // console.log(`\nFound file: ${fileName}`); + // console.log(`Size: ${fileData.length} bytes`); + // // If it's a text file, you can print its content + // console.log('\nContent:'); + // console.log(fileData.toString('utf8')); + // } + } catch (error) { + console.error('Error:', error.message); + } +} + +/** + * Example 4: Use with Express.js API + */ +function expressApiExample() { + // This is just a code example, not executable + const express = require('express'); + const app = express(); + + app.get('/api/pak/list/:pakName', async (req, res) => { + try { + const pakFile = path.join(__dirname, '../paks', req.params.pakName + '.pak'); + const unpacker = new PakUnpacker(pakFile); + await unpacker.load(); + + const files = unpacker.listFiles(); + res.json({ success: true, files }); + } catch (error) { + res.status(500).json({ success: false, error: error.message }); + } + }); + + app.get('/api/pak/extract/:pakName/:fileName(*)', async (req, res) => { + try { + const pakFile = path.join(__dirname, '../paks', req.params.pakName + '.pak'); + const unpacker = new PakUnpacker(pakFile); + await unpacker.load(); + + const fileData = await unpacker.getFile(req.params.fileName); + + if (fileData) { + res.setHeader('Content-Type', 'application/octet-stream'); + res.setHeader('Content-Disposition', `attachment; filename="${path.basename(req.params.fileName)}"`); + res.send(fileData); + } else { + res.status(404).json({ success: false, error: 'File not found' }); + } + } catch (error) { + res.status(500).json({ success: false, error: error.message }); + } + }); + + app.listen(3000, () => { + console.log('API server running on port 3000'); + }); +} + +/** + * Example 5: Extract files by pattern (e.g., all *.conf files) + */ +async function extractByPattern() { + const pakFile = path.join(__dirname, 'data.pak'); + const pattern = '*.conf'; // You can use: "*.conf", "*.json", "config/*.xml", "**/*.txt" + const outputDir = path.join(__dirname, '../output'); + + try { + console.log('Loading PAK file...'); + const unpacker = new PakUnpacker(pakFile); + await unpacker.load(); + + console.log(`\nSearching for files matching: ${pattern}`); + const extractedFiles = await unpacker.extractByPattern(pattern, outputDir); + + if (extractedFiles.length > 0) { + console.log(`\nSuccessfully extracted ${extractedFiles.length} file(s):`); + extractedFiles.forEach(file => console.log(` - ${file}`)); + } else { + console.log('\nNo files matched the pattern.'); + } + } catch (error) { + console.error('Error:', error.message); + } +} + +/** + * Example 6: Synchronous version (blocking) + */ +function syncExample() { + const pakFile = path.join(__dirname, '../test.pak'); + const outputDir = path.join(__dirname, '../output-sync'); + + try { + console.log('Loading PAK file (sync)...'); + const unpacker = new PakUnpacker(pakFile); + unpacker.loadSync(); + + console.log(`Found ${unpacker.entries.length} files.`); + + console.log('\nExtracting files (sync)...'); + unpacker.extractSync(outputDir); + + console.log('\nExtraction complete!'); + } catch (error) { + console.error('Error:', error.message); + } +} + +async function extractConfFiles() { + const fs = require('fs'); + const pakDir = path.join(__dirname, 'data'); // Ordner mit den PAK-Dateien + const outputDir = './output'; + const filePattern = '**/*.c'; // Pattern für die zu extrahierenden Dateien + + try { + // Alle .pak Dateien im Ordner finden + const files = fs.readdirSync(pakDir).filter(file => file.endsWith('.pak')); + + if (files.length === 0) { + console.log('Keine .pak Dateien gefunden im Ordner:', pakDir); + return; + } + + console.log(`Gefunden: ${files.length} PAK-Datei(en)\n`); + + let totalExtracted = 0; + + // Jede PAK-Datei nacheinander verarbeiten + for (const pakFileName of files) { + const pakFile = path.join(pakDir, pakFileName); + console.log(`Verarbeite: ${pakFileName}`); + + const unpacker = new PakUnpacker(pakFile); + await unpacker.load(); + + const extractedFiles = await unpacker.extractByPattern(filePattern, outputDir); + console.log(` → ${extractedFiles.length} Datei(en) extrahiert`); + + totalExtracted += extractedFiles.length; + } + + console.log(`\nGesamt: ${totalExtracted} Datei(en) aus ${files.length} PAK-Archiv(en) extrahiert`); + } catch (error) { + console.error('Fehler:', error.message); + } +} + +// Run examples +if (require.main === module) { + console.log('=== PAK Unpacker Examples ===\n'); + + // Uncomment the example you want to run: + //extractAllFiles(); + //listFiles(); + //extractSingleFile(); + + extractConfFiles(); + //syncExample(); + + console.log('Uncomment an example function to run it.'); +} + +module.exports = { + extractAllFiles, + listFiles, + extractSingleFile, + extractByPattern, + syncExample +}; diff --git a/output/.gitkeep b/output/.gitkeep new file mode 100644 index 0000000..e69de29