Reputation: 33
I can't seem to make partial write() work. It goes out of the memory and I don't know why.
int fd = open(path, O_RDONLY);
if(fd == -1) {error handling}
const size_t read_size = 100;
size_t size = read_size;
size_t offset = 0;
size_t res = 0;
char *buff = malloc(size+1);
int lines = 0;
int pos = 0;
while((res = read(fd, buff + offset, read_size)) > 0)
{
if(res == -1){error handling}
offset += res;
buff[offset] = '\0';
if (offset + read_size > size)
{
size *= 2;
buff = realloc(buff, size+1);
}
}
for(size_t i = 0;buff[i] != '\0'; i++) // counting the buff lines
{
if(buff[i] == '\n')
{
lines++;
}
}
size = read_size;
offset = 0;
res = 0;
if(lines < 10)
{
while((res = write(STDOUT_FILENO, buff+offset, read_size)) > 0)
{
offset += res;
}
}
buff[offset] = '\0';
else{another case where the position is found where the write() needs to start printing}
This is a part of a tail implementation in c. There is also another function which handles stdin and does the same thing (this one handles files).
Upvotes: 0
Views: 1355
Reputation: 385657
This is what it might look like:
// Returns 0 on success.
// Returns -1 and sets errno on error.
int write_full(int fd, void *a_buf, size_t count) {
const char *buf = (char *)a_buf;
while ( count > 0 ) {
ssize_t chunk_size = write(fd, buf, count);
if ( chunk_size < 0 )
return -1;
buf += chunk_size;
count -= chunk_size;
}
return 0;
}
Testing is tricky. I've only been able to generate a partial write when using a non-blocking handle writing to a pipe with a blocked consumer.
But that results in error EAGAIN
or EWOULDBLOCK
so if we temporarily add code to immediately try again (which would be bad to do in practice), we can see the partial writes working.
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
// Returns 0 on success.
// Returns -1 and sets errno on error.
int write_full(int fd, void *a_buf, size_t count) {
const char *buf = (char *)a_buf;
while ( count > 0 ) {
ssize_t chunk_size = write(fd, buf, count);
if ( chunk_size < 0 && ( errno == EAGAIN || errno == EWOULDBLOCK ) ) continue; // DEBUG
if ( chunk_size < 0 )
return -1;
fprintf(stderr, "Wrote %zd\n", chunk_size); // DEBUG
buf += chunk_size;
count -= chunk_size;
}
return 0;
}
int main(void) {
int fd = STDOUT_FILENO;
fcntl(fd, F_SETFL, O_NONBLOCK); // Make non-blocking
const size_t n = 100000;
char *buf = malloc(n);
if (!buf) {
perror("Can't allocate memory");
exit(1);
}
for (size_t i=n; i--; )
buf[i] = 'x';
if ( write_full(fd, buf, n) < 0 ) {
perror("Write error");
exit(1);
}
free(buf);
return 0;
}
$ gcc -Wall -Wextra -pedantic a.c -o a && ./a | perl -e'print while <>' >/dev/null
Wrote 65536
Wrote 8192
Wrote 8192
Wrote 16384
Wrote 1696
Perl takes longer to load than the C program allowing the 64 KiB pipe buffer to fill up. You can ensure this bad adding sleep 2;
to the start of the Perl program.
Perl reads in 8 KiB chunks, and it takes longer to do so than it takes for the C program to write, so the C program is constantly running out of space in the pipe buffer.
Upvotes: 1