Reputation: 35
The parent process distributes the work equally among children. Essentially, the user will input
number of integers the data will have and store that into count. Then will input count number of integers, then the last input will be N number of children the Parent will have to send the data to. I'll eventually use the children to do a merge sort with the data but for now I was able to separate the array into approximately equal chunks using printfs, after feeling good about it I switched the printf to my write() call to the pipes to be send to the children. It didn't work, i'm not sure what's going on i've tried lots of different things. Any help would be great! Thanks
Here is example output of my code:
Input count of integers, data, then N Children: 10
1 2 3 4 5 6 7 8 9 10 3
1 2 3 4 5 6 7 8 9 10
I'm the parent --> 1111797
I'm child # 2 , with pid: 1112221
I'm child # 1 , with pid: 1112220
I'm child # 3 , with pid: 1112222
process 1111797 is sending data
1 2 3 4 5 6 7 8 9 10 child 1 recieved data
Child PID 1112220 terminated with return status 0
Normally for the code below i'd use a for loop to iterate through int l while l is less than N children to send the data via pipe for each child in the pipe multi dimensional array.. but in this case I just did l++ because the code already takes the children into account when it's separating the data into N (number of children) separate chunks. Code In Question:
// seperates file into chunks
int l = 0;
for (start = 0, end = chunk_size; start < count; start = end, end = start + chunk_size)
{
if (bonus)
{
end++;
bonus--;
}
// l is the child process to send the pipe to
for (int o = start; o < end; o++)
{
if (write(fd[l][WRITE], &ar[o], sizeof(int)) < 0)
{
perror("writing to pipe");
exit(1);
}
}
l++;
}
Main Code:
#include <pthread.h>
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <unistd.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <stdbool.h>
#define READ 0
#define WRITE 1
// counts the number of data
int count;
// counts number of children
int N = 0;
pid_t pid;
bool running = true;
int child_work(int child_num, int fd[], int arr[]);
void sigHandler(int number)
{
printf("process %d is sending data\n", getpid());
}
int main()
{
signal(SIGUSR1, sigHandler);
// The parent process is required to maintain input descriptors (the upstream pipes) and
// output descriptors (the downstream pipes).
int i = 0;
int *ar;
printf("Input count of integers, data, then N Children: ");
scanf("%d", &count);
ar = malloc(count * sizeof(int)); // alocates memory based on stdin input size on runtime
if (ar == NULL)
{
fprintf(stderr, "memory allocation failed.\n");
return -1;
}
for (i = 0; i < count; i++)
{
scanf("%d", &ar[i]);
if (ar[i] > 103 || ar[i] < -10)
{
fprintf(stderr, "integer out of scope");
return -1;
}
}
// scan in the numbe of children
scanf("%d", &N);
printf("\n");
int chunk_size = (count / N);
int bonus = count - (chunk_size * N);
int start;
int end;
// multi-dimensional array to initalize pipes for all children
int fd[N][2];
// prints initial array;
for (i = 0; i < count; i++)
{
printf("%d ", ar[i]);
}
printf("\n\n");
printf("I'm the parent --> %d\n", getpid()); // prints parent id.
for (int fk = 0; fk < N; fk++)
{
// parent
// pipe call
if (pipe(fd[fk]) < 0)
{
perror("plumbing problem");
exit(1);
}
if ((pid = fork()) < 0)
{
perror("fork");
exit(1);
}
// child
if (pid == 0)
{
signal(SIGUSR1, SIG_IGN);
// sleep(4);
printf("I'm child # %d , with pid: %d\n", fk + 1, getpid());
// testing with sleep calls
sleep(1);
child_work(fk, fd[fk], ar);
free(ar);
exit(0);
}
// parent
// printf("parent 1st stage, %d\n", getpid());
}
// After the loop--parent code
// waits for USR1 Signal
pause();
//***signal handler triggered** send data to child via Pipe
// loop to close READ file descriptor for j child
for (int j = 0; j < N; j++)
{
if (close(fd[j][READ]) < 0)
{
perror("closing fd[READ]");
exit(1);
}
}
// loop to WRITE file descriptor for fk child
/* for (int l = 0; l < N; l++)
{
for (int q = 0; q < count; q++)
{
if (write(fd[l][WRITE], &ar[q], sizeof(int)) < 0)
{
perror("writing to pipe");
exit(1);
}
}
}
*/
// seperates file into chunks
int l = 0;
for (start = 0, end = chunk_size; start < count; start = end, end = start + chunk_size)
{
if (bonus)
{
end++;
bonus--;
}
// l is the child process to send the pipe to
for (int o = start; o < end; o++)
{
if (write(fd[l][WRITE], &ar[o], sizeof(int)) < 0)
{
perror("writing to pipe");
exit(1);
}
}
l++;
}
/*
for (int l = 0; l < N; l++)
{
for (int q = 0; q < count; q++)
{
if (write(fd[l][WRITE], &ar[q], sizeof(int)) < 0)
{
perror("writing to pipe");
exit(1);
}
}
} */
int wstatus;
for (int wt = 0; wt < N; wt++)
{
pid = wait(&wstatus);
printf("Child PID %ld terminated with return status %d \n", (long)pid, WEXITSTATUS(wstatus));
}
// printf("parent 3rd stage");
free(ar);
}
int child_work(int child_num, int fd[], int arr[])
{
if (close(fd[WRITE]) < 0)
{
perror("closing fd[WRITE]");
exit(1);
}
// loop to READ file descriptor for fk number of Data
for (int fk = 0; fk < count; fk++)
{
if (read(fd[READ], &arr[fk], sizeof(int)) < 0)
{
perror("reading to pipe");
exit(1);
}
printf("%d ", arr[fk]);
}
sleep(1);
printf("child %d recieved data\n", child_num + 1);
return 0;
}
Upvotes: 0
Views: 63
Reputation: 12698
IMHO, you are trying the wrong way. You are trying to feed equal sized chunks of information to each children, when you need to provide each a chunk of already in order numbers (this is how the merge sort works) So you first have to start separating your input in chunks and give each child (or n you have) a full chunk of numbers (a chunk being a subset of already in order numbers, so e.g. 1, 2, 5, 3, 7, 4 has three chunks, {1, 2, 5}, {3, 7} and {4}) You do this and then ask your children to return the numbers they have, and selecting the lowest number of the set of offered numbers on each child, this will merge N chunks into one, and the process terminates when you have only a single chunk.... (e.g. you give chunks to only one child, because there's only one chunk)
By the way, the approach above doesn't require multiple threads, as the card dealer only works to give the cards to each child.... and the children's task is just to store the data to return it back to the dealer in bigger chunks. It will be better to make a pipeline of processes, each making one iteration, each process deals the cards in arrays, then merges the chunks to the next process in the pipeline. In that case you have all iterations working in parallell, as soon as one starts outputting cards to the next process, the next process starts dealing them into blocks and merging (you can have indeed one process dealing the cards, and a second merging them and forwarding to the next step) You create new processes as soon as you detect a new chunk (if you create as many processes as chunks you have, then you can finish in just one iteration)
Upvotes: 1