Re: [PATCH v4] Threaded grep

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Jan 25, 2010 at 05:20:39PM -0800, Junio C Hamano wrote:
> Fredrik Kuivinen <frekui@xxxxxxxxx> writes:
> 
> > The patch has been rebased on top of next.
> >
> > Additional changes since v3:
> >
> > * Fix some issues with Git's native pthreads implementation on
> >   Windows (pthread_cond_broadcast is still used though).
> > * Fix style issues.
> > * When greping in a tree, allocate memory for the data buffer as
> >   late as possible.
> > * Return void from grep_sha1_async and grep_file_async instead of
> >   always returning 0.
> 
> Thanks; I've fixed up a few old-style declaration header and queued the
> result to 'pu'.

I just noticed that I forgot to take the read_sha1 lock in
grep_tree. The result is a race condition in read_sha1_file.

Here is a patch to fix this. It applies on top of ae35c68 (Threaded
grep). Could you please squash it in? Let me know if you want me to
resend the entire patch instead.

(The only important thing is that we now take the read_sha1_mutex in
grep_tree. The rest is there just to make it self-consistent.)


- Fredrik

--- 8< ---

diff --git a/builtin-grep.c b/builtin-grep.c
index 7ecf222..6cc743d 100644
--- a/builtin-grep.c
+++ b/builtin-grep.c
@@ -75,10 +75,15 @@ static int todo_done;
 static int all_work_added;
 
 /* This lock protects all the variables above. */
-static pthread_mutex_t grep_lock;
+static pthread_mutex_t grep_mutex;
 
 /* Used to serialize calls to read_sha1_file. */
-static pthread_mutex_t read_sha1_lock;
+static pthread_mutex_t read_sha1_mutex;
+
+#define grep_lock() pthread_mutex_lock(&grep_mutex)
+#define grep_unlock() pthread_mutex_unlock(&grep_mutex)
+#define read_sha1_lock() pthread_mutex_lock(&read_sha1_mutex)
+#define read_sha1_unlock() pthread_mutex_unlock(&read_sha1_mutex)
 
 /* Signalled when a new work_item is added to todo. */
 static pthread_cond_t cond_add;
@@ -93,10 +98,10 @@ static pthread_cond_t cond_result;
 
 static void add_work(enum work_type type, char *name, void *id)
 {
-	pthread_mutex_lock(&grep_lock);
+	grep_lock();
 
 	while ((todo_end+1) % ARRAY_SIZE(todo) == todo_done) {
-		pthread_cond_wait(&cond_write, &grep_lock);
+		pthread_cond_wait(&cond_write, &grep_mutex);
 	}
 
 	todo[todo_end].type = type;
@@ -107,16 +112,16 @@ static void add_work(enum work_type type, char *name, void *id)
 	todo_end = (todo_end + 1) % ARRAY_SIZE(todo);
 
 	pthread_cond_signal(&cond_add);
-	pthread_mutex_unlock(&grep_lock);
+	grep_unlock();
 }
 
 static struct work_item *get_work(void)
 {
 	struct work_item *ret;
 
-	pthread_mutex_lock(&grep_lock);
+	grep_lock();
 	while (todo_start == todo_end && !all_work_added) {
-		pthread_cond_wait(&cond_add, &grep_lock);
+		pthread_cond_wait(&cond_add, &grep_mutex);
 	}
 
 	if (todo_start == todo_end && all_work_added) {
@@ -125,7 +130,7 @@ static struct work_item *get_work(void)
 		ret = &todo[todo_start];
 		todo_start = (todo_start + 1) % ARRAY_SIZE(todo);
 	}
-	pthread_mutex_unlock(&grep_lock);
+	grep_unlock();
 	return ret;
 }
 
@@ -148,7 +153,7 @@ static void work_done(struct work_item *w)
 {
 	int old_done;
 
-	pthread_mutex_lock(&grep_lock);
+	grep_lock();
 	w->done = 1;
 	old_done = todo_done;
 	for(; todo[todo_done].done && todo_done != todo_start;
@@ -165,7 +170,7 @@ static void work_done(struct work_item *w)
 	if (all_work_added && todo_done == todo_end)
 		pthread_cond_signal(&cond_result);
 
-	pthread_mutex_unlock(&grep_lock);
+	grep_unlock();
 }
 
 static void *run(void *arg)
@@ -181,11 +186,7 @@ static void *run(void *arg)
 		opt->output_priv = w;
 		if (w->type == WORK_SHA1) {
 			unsigned long sz;
-			void* data;
-
-			pthread_mutex_lock(&read_sha1_lock);
-			data = load_sha1(w->identifier, &sz, w->name);
-			pthread_mutex_unlock(&read_sha1_lock);
+			void* data = load_sha1(w->identifier, &sz, w->name);
 
 			if (data) {
 				hit |= grep_buffer(opt, w->name, data, sz);
@@ -218,8 +219,8 @@ static void start_threads(struct grep_opt *opt)
 {
 	int i;
 
-	pthread_mutex_init(&grep_lock, NULL);
-	pthread_mutex_init(&read_sha1_lock, NULL);
+	pthread_mutex_init(&grep_mutex, NULL);
+	pthread_mutex_init(&read_sha1_mutex, NULL);
 	pthread_cond_init(&cond_add, NULL);
 	pthread_cond_init(&cond_write, NULL);
 	pthread_cond_init(&cond_result, NULL);
@@ -246,18 +247,18 @@ static int wait_all(void)
 	int hit = 0;
 	int i;
 
-	pthread_mutex_lock(&grep_lock);
+	grep_lock();
 	all_work_added = 1;
 
 	/* Wait until all work is done. */
 	while (todo_done != todo_end)
-		pthread_cond_wait(&cond_result, &grep_lock);
+		pthread_cond_wait(&cond_result, &grep_mutex);
 
 	/* Wake up all the consumer threads so they can see that there
 	 * is no more work to do.
 	 */
 	pthread_cond_broadcast(&cond_add);
-	pthread_mutex_unlock(&grep_lock);
+	grep_unlock();
 
 	for (i = 0; i < ARRAY_SIZE(threads); i++) {
 		void *h;
@@ -265,8 +266,8 @@ static int wait_all(void)
 		hit |= (int) (intptr_t) h;
 	}
 
-	pthread_mutex_destroy(&grep_lock);
-	pthread_mutex_destroy(&read_sha1_lock);
+	pthread_mutex_destroy(&grep_mutex);
+	pthread_mutex_destroy(&read_sha1_mutex);
 	pthread_cond_destroy(&cond_add);
 	pthread_cond_destroy(&cond_write);
 	pthread_cond_destroy(&cond_result);
@@ -274,6 +275,9 @@ static int wait_all(void)
 	return hit;
 }
 #else /* !NO_PTHREADS */
+#define read_sha1_lock()
+#define read_sha1_unlock()
+
 static int wait_all(void)
 {
 	return 0;
@@ -407,7 +411,11 @@ static void *load_sha1(const unsigned char *sha1, unsigned long *size,
 		       const char *name)
 {
 	enum object_type type;
-	char *data = read_sha1_file(sha1, &type, size);
+	char *data;
+
+	read_sha1_lock();
+	data = read_sha1_file(sha1, &type, size);
+	read_sha1_unlock();
 
 	if (!data)
 		error("'%s': unable to read %s", name, sha1_to_hex(sha1));
@@ -596,7 +604,10 @@ static int grep_tree(struct grep_opt *opt, const char **paths,
 			void *data;
 			unsigned long size;
 
+			read_sha1_lock();
 			data = read_sha1_file(entry.sha1, &type, &size);
+			read_sha1_unlock();
+
 			if (!data)
 				die("unable to read tree (%s)",
 				    sha1_to_hex(entry.sha1));
--
To unsubscribe from this list: send the line "unsubscribe git" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Linux Kernel Development]     [Gcc Help]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [V4L]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]     [Fedora Users]