This library allows you to read large amounts of data in chunks.
npm install --save async-chunk-reader
Added reading from zip file (v1.0.6) Added skipRows (v.1.0.8) to be added in the future (v.1.0.9)
init(parameters : InitParameters)
input : InitParameters
- chunk_size : String
- input_file : String | Stream
- encoding : String
- selectedFileName String
get()
output : Async Iterator
const reader = require('async-chunk-reader')
import * as reader from "async-chunk-reader"
async function main(){
const data = await reader
.init({
chunkSize: 100000,
inputFile: 'input/mobile_network_201805.csv.gz'
})
.get()
for await(let chunk of data){
console.log(chunk.map(d=>d.value))
}
}
main();
async function main(){
const data = await reader
.init({
inputFile: process.stdin
})
.get()
for await(let chunk of data){
console.log(chunk.map(d=>d.value))
}
}
main();
async function main(){
const data = await reader
.init({
inputFile: "Some string"
})
.get()
for await(let chunk of data){
console.log(chunk.map(d=>d.value))
}
}
main();
async function main(){
const data = await reader
.init({
chunkSize: 100000,
inputFile: 'example/Archive.zip',
selectedFileName:'avocado.csv' #file in zip
})
.get()
for await(let chunk of data){
console.log(chunk.map(d=>d.value))
}
}
main();
async function main(){
const data = await reader
.init({
chunkSize: 100000,
inputFile: 'example/Archive.zip',
specialChunkSize : {0:10000, 1:40000}
})
.get()
for await(let chunk of data){
console.log(chunk.map(d=>d.value))
}
}
main();
async function main(){
const data = await reader
.init({
chunkSize: 100000,
inputFile: 'example/Archive.zip',
skipRows : 40000
})
.get()
for await(let chunk of data){
console.log(chunk.map(d=>d.value))
}
}
main();