plus

145

--- a/apps/jobsub/src/jobsub/views.py
+++ b/apps/jobsub/src/jobsub/views.py
@@ -69,19 +69,20 @@ def _list_designs(request, owner, name, order_by='-last_modified'):
   for doc in data[:MAX_DESIGNS]:
     design = doc.content_object
 
-    ko_design = {
-      'id': design.id,
-      'owner': design.owner.username,
-      # Design name is validated by workflow and node forms.
-      'name': design.name,
-      'description': design.description,
-      'node_type': design.start.get_child('to').node_type,
-      'last_modified': py_time.mktime(design.last_modified.timetuple()),
-      'editable': design.owner.id == request.user.id,
-      'is_shared': design.is_shared,
-      'is_trashed': doc.is_trashed()
-    }
-    designs.append(ko_design)
+    if design is not None:
+      ko_design = {
+       'id': design.id,
+       'owner': design.owner.username,
+       # Design name is validated by workflow and node forms.
+       'name': design.name,
+       'description': design.description,
+       'node_type': design.start.get_child('to').node_type,
+       'last_modified': py_time.mktime(design.last_modified.timetuple()),
+       'editable': design.owner.id == request.user.id,
+       'is_shared': design.is_shared,
+       'is_trashed': doc.is_trashed()
+      }
+      designs.append(ko_design)
 
   return designs
 
plus

10

--- a/core/src/main/java/org/apache/cxf/message/ExchangeImpl.java
+++ b/core/src/main/java/org/apache/cxf/message/ExchangeImpl.java
@@ -19,6 +19,8 @@
 
 package org.apache.cxf.message;
 
+import java.util.concurrent.ConcurrentHashMap;
+
 import org.apache.cxf.Bus;
 import org.apache.cxf.binding.Binding;
 import org.apache.cxf.endpoint.ConduitSelector;
@@ -30,7 +32,7 @@
 import org.apache.cxf.transport.Destination;
 import org.apache.cxf.transport.Session;
 
-public class ExchangeImpl extends StringMapImpl implements Exchange {
+public class ExchangeImpl extends ConcurrentHashMap<String, Object>  implements Exchange {
     
     private static final long serialVersionUID = -3112077559217623594L;
     private Destination destination;
@@ -69,24 +71,6 @@ public ExchangeImpl(ExchangeImpl ex) {
         this.bindingOp = ex.bindingOp;
     }
 
-    /*
-    public <T> T get(Class<T> key) { 
-        if (key == Bus.class) {
-            return (T)bus;
-        } else if (key == Service.class) {
-            return (T)service;
-        } else if (key == Endpoint.class) {
-            return (T)endpoint;
-        } else if (key == BindingOperationInfo.class) {
-            return (T)bindingOp;
-        } else if (key == Binding.class) {
-            return (T)binding;
-        } else if (key == OperationInfo.class) {
-            return super.get(key);
-        }
-        return super.get(key);
-    }
-    */
     private void resetContextCaches() {
         if (inMessage != null) {
             inMessage.resetContextCache();
@@ -101,9 +85,17 @@ private void resetContextCaches() {
             outFaultMessage.resetContextCache();
         }
     }
+    
+    public <T> T get(Class<T> key) {
+        return key.cast(get(key.getName()));
+    }
 
     public <T> void put(Class<T> key, T value) {
-        super.put(key, value);
+        if (value == null) {
+            super.remove(key);
+        } else {
+            super.put(key.getName(), value);
+        }
         if (key == Bus.class) {
             resetContextCaches();
             bus = (Bus)value;
@@ -119,6 +111,7 @@ private void resetContextCaches() {
             binding = (Binding)value;
         }
     }
+    
     public Object put(String key, Object value) {
         if (inMessage != null) {
             inMessage.setContextualProperty(key, value);
@@ -132,6 +125,9 @@ public Object put(String key, Object value) {
         if (outFaultMessage != null) {
             outFaultMessage.setContextualProperty(key, value);
         }
+        if (value == null) {
+            return super.remove(key);
+        }
         return super.put(key, value);
     }
 
plus

10

--- a/core/src/main/java/org/apache/cxf/message/ExchangeImpl.java
+++ b/core/src/main/java/org/apache/cxf/message/ExchangeImpl.java
@@ -19,6 +19,8 @@
 
 package org.apache.cxf.message;
 
+import java.util.concurrent.ConcurrentHashMap;
+
 import org.apache.cxf.Bus;
 import org.apache.cxf.binding.Binding;
 import org.apache.cxf.endpoint.ConduitSelector;
@@ -30,7 +32,7 @@
 import org.apache.cxf.transport.Destination;
 import org.apache.cxf.transport.Session;
 
-public class ExchangeImpl extends StringMapImpl implements Exchange {
+public class ExchangeImpl extends ConcurrentHashMap<String, Object>  implements Exchange {
     
     private static final long serialVersionUID = -3112077559217623594L;
     private Destination destination;
@@ -69,24 +71,6 @@ public ExchangeImpl(ExchangeImpl ex) {
         this.bindingOp = ex.bindingOp;
     }
 
-    /*
-    public <T> T get(Class<T> key) { 
-        if (key == Bus.class) {
-            return (T)bus;
-        } else if (key == Service.class) {
-            return (T)service;
-        } else if (key == Endpoint.class) {
-            return (T)endpoint;
-        } else if (key == BindingOperationInfo.class) {
-            return (T)bindingOp;
-        } else if (key == Binding.class) {
-            return (T)binding;
-        } else if (key == OperationInfo.class) {
-            return super.get(key);
-        }
-        return super.get(key);
-    }
-    */
     private void resetContextCaches() {
         if (inMessage != null) {
             inMessage.resetContextCache();
@@ -101,9 +85,17 @@ private void resetContextCaches() {
             outFaultMessage.resetContextCache();
         }
     }
+    
+    public <T> T get(Class<T> key) {
+        return key.cast(get(key.getName()));
+    }
 
     public <T> void put(Class<T> key, T value) {
-        super.put(key, value);
+        if (value == null) {
+            super.remove(key);
+        } else {
+            super.put(key.getName(), value);
+        }
         if (key == Bus.class) {
             resetContextCaches();
             bus = (Bus)value;
@@ -119,6 +111,7 @@ private void resetContextCaches() {
             binding = (Binding)value;
         }
     }
+    
     public Object put(String key, Object value) {
         if (inMessage != null) {
             inMessage.setContextualProperty(key, value);
@@ -132,6 +125,9 @@ public Object put(String key, Object value) {
         if (outFaultMessage != null) {
             outFaultMessage.setContextualProperty(key, value);
         }
+        if (value == null) {
+            return super.remove(key);
+        }
         return super.put(key, value);
     }
 
plus

3

--- a/spec/core_ext_spec.rb
+++ b/spec/core_ext_spec.rb
@@ -445,24 +445,24 @@ def test2; :test2; end
 
 describe Enumerable do
 
-  # it "maps deeply" do
-  #   [["a\n", "b\n"], ["c\n", "d\n"]].map_recursively(&:strip).should == [ %w[a b], %w[c d] ]
+  it "maps deeply" do
+    [["a\n", "b\n"], ["c\n", "d\n"]].map_recursively(&:strip).should == [ %w[a b], %w[c d] ]
     
-  #   [[1,2],[3,4]].deep_map {|e| e ** 2}.should == [[1,4],[9,16]] 
-  #   [1,2,3,4].deep_map {|e| e ** 2}.should == [1,4,9,16] 
-  #   [[],[],1,2,3,4].deep_map {|e| e ** 2}.should == [[], [], 1, 4, 9, 16] 
+    [[1,2],[3,4]].deep_map {|e| e ** 2}.should == [[1,4],[9,16]] 
+    [1,2,3,4].deep_map {|e| e ** 2}.should == [1,4,9,16] 
+    [[],[],1,2,3,4].deep_map {|e| e ** 2}.should == [[], [], 1, 4, 9, 16] 
 
-  #   {1=>2, 3=>{4=>5, 6=>7}}.deep_map {|k,v| [k, v**2] }.should == [ [1,4], [3, [[4,25], [6,49]]] ]
-  # end
+    {1=>2, 3=>{4=>5, 6=>7}}.deep_map {|k,v| [k, v**2] }.should == [ [1,4], [3, [[4,25], [6,49]]] ]
+  end
   
-  # it "selects deeply" do
-  #   [[1,2],[3,4]].deep_select {|e| e % 2 == 0 }.should == [[2],[4]]
-  #   puts
+  it "selects deeply" do
+    [[1,2],[3,4]].deep_select {|e| e % 2 == 0 }.should == [[2],[4]]
+    puts
 
-  #   {1=>2, 3=>{4=>5, 6=>7}}.deep_select {|k,v| k == 1 }.should == {1=>2} 
-  #   #[1,2,3,4].deep_select {|e| e ** 2}.should == [1,4,9,16] 
-  #   #[[],[],1,2,3,4].deep_select {|e| e ** 2}.should == [[], [], 1, 4, 9, 16] 
-  # end
+    {1=>2, 3=>{4=>5, 6=>7}}.deep_select {|k,v| k == 1 }.should == {1=>2} 
+    #[1,2,3,4].deep_select {|e| e ** 2}.should == [1,4,9,16] 
+    #[[],[],1,2,3,4].deep_select {|e| e ** 2}.should == [[], [], 1, 4, 9, 16] 
+  end
   
   it "splits" do
     [1,2,3,4,5].split_at     {|e| e == 3}.should == [ [1,2], [4,5] ]
plus

42

--- a/src/bench.c
+++ b/src/bench.c
@@ -112,6 +112,14 @@ static void bench_install_handler(void)
 #endif
 }
 
+/* Mutes ASAN problems. We pass a buffer long enough for any use */
+#define fmt_set_key(key, index)	  \
+	{ \
+		static char buf_key[PLAINTEXT_BUFFER_SIZE]; \
+		strncpy(buf_key, key, sizeof(buf_key)); \
+		format->methods.set_key(buf_key, index); \
+	}
+
 static void bench_set_keys(struct fmt_main *format,
 	struct fmt_tests *current, int cond)
 {
@@ -136,8 +144,7 @@ static void bench_set_keys(struct fmt_main *format,
 			} else
 				break;
 		} while (1);
-
-		format->methods.set_key(plaintext, index);
+		fmt_set_key(plaintext, index);
 	}
 }
 
plus

20

--- a/librosa/core.py
+++ b/librosa/core.py
@@ -11,7 +11,6 @@
 import numpy.fft as fft
 import scipy.signal
 import scipy.ndimage
-from builtins import range
 
 from . import filters
 from . import feature
@@ -709,7 +708,7 @@ def __variable_hop_response(my_y, target_hop):
 
         if zoom_factor > 0:
             # We need to aggregate.  Generate the boundary frames
-            bounds = list(range(0, my_cqt.shape[1], 2**(zoom_factor)))
+            bounds = list(np.arange(0, my_cqt.shape[1], 2**(zoom_factor)))
             my_cqt = feature.sync(my_cqt, bounds,
                                   aggregate=aggregate)
 
plus

5

--- a/inc/functions-post-statuses.php
+++ b/inc/functions-post-statuses.php
@@ -23,6 +23,9 @@
 /* Transition post status. */
 add_action( 'transition_post_status', 'mb_transition_post_status', 10, 3 );
 
+/* Permanently-deleted post. */
+add_action( 'before_delete_post', 'mb_before_delete_post' );
+
 /**
  * Returns the slug for the "publish" post status.  Used by replies by default.  Note that this status 
  * is not registered by default because it's a default WordPress post status.
@@ -570,3 +573,88 @@ function mb_restore_post_status( $post_id ) {
 function mb_update_post_status( $post_id, $status ) {
 	return wp_update_post( array( 'ID' => $post_id, 'post_status' => $status ) );
 }
+
+/**
+ * Callback function on the `before_delete_post` hook for when a post is deleted. This sets up some 
+ * specific actions based on our post types. It also saves the deleted post object for later use in 
+ * those actions.
+ *
+ * @since  1.0.0
+ * @access public
+ * @param  int     $post_id
+ * @return void
+ */
+function mb_before_delete_post( $post_id ) {
+
+	$forum_type = mb_get_forum_post_type();
+	$topic_type = mb_get_topic_post_type();
+	$reply_type = mb_get_reply_post_type();
+	$post_type  = get_post_type( $post_id );
+
+	/* WP doesn't pass the post object after a post has been deleted, so we need to save it temporarily. */
+	if ( in_array( $post_type, array( $forum_type, $topic_type, $reply_type ) ) )
+		message_board()->deleted_post = get_post( $post_id );
+
+	/* If a forum is being deleted. */
+	if ( $forum_type === $post_type )
+		add_action( 'after_delete_post', 'mb_after_delete_forum' );
+
+	/* If a topic is being deleted. */
+	elseif ( $topic_type === $post_type )
+		add_action( 'after_delete_post', 'mb_after_delete_topic' );
+
+	/* If a reply is being deleted. */
+	elseif ( $reply_type === $post_type )
+		add_action( 'after_delete_post', 'mb_after_delete_reply' );
+}
+
+/**
+ * Callback function on the `after_delete_post` hook for when a forum is deleted.
+ *
+ * @todo All forum topics need to become orphans at this point. Attempt to move topics into parent if avail.
+ * @todo Reset counts for parent forums.
+ * @todo `wp_die()` if this is the default forum.
+ *
+ * @since  1.0.0
+ * @access public
+ * @param  int     $post_id
+ * @return void
+ */
+function mb_after_delete_forum( $post_id ) {}
+
+/**
+ * Callback function on the `after_delete_post` hook for when a topic is deleted.
+ *
+ * @todo All topic replies need to become orphans at this point.
+ * @todo Remove from sticky arrays.
+ *
+ * @since  1.0.0
+ * @access public
+ * @param  int     $post_id
+ * @return void
+ */
+function mb_after_delete_topic( $post_id ) {
+	$mb = message_board();
+
+	if ( is_object( $mb->deleted_post ) && $mb->deleted_post->ID === $post_id ) {
+		mb_reset_topic_data( $mb->deleted_post );
+		$mb->deleted_post = null;
+	}
+}
+
+/**
+ * Callback function on the `after_delete_post` hook for when a reply is deleted.
+ *
+ * @since  1.0.0
+ * @access public
+ * @param  int     $post_id
+ * @return void
+ */
+function mb_after_delete_reply( $post_id ) {
+	$mb = message_board();
+
+	if ( is_object( $mb->deleted_post ) && $mb->deleted_post->ID === $post_id ) {
+		mb_reset_reply_data( $mb->deleted_post );
+		$mb->deleted_post = null;
+	}
+}
plus

36

--- a/plex/Client/PlexServer.cpp
+++ b/plex/Client/PlexServer.cpp
@@ -235,7 +235,9 @@ bool CPlexServer::UpdateReachability()
   BOOST_FOREACH(CPlexConnectionPtr conn, sortedConnections)
   {
     CLog::Log(LOGDEBUG, "CPlexServer::UpdateReachability testing connection %s", conn->toString().c_str());
-    if (g_plexApplication.myPlexManager->GetCurrentUserInfo().restricted && conn->GetAccessToken().IsEmpty())
+    if (g_plexApplication.myPlexManager &&
+        g_plexApplication.myPlexManager->GetCurrentUserInfo().restricted &&
+        conn->GetAccessToken().IsEmpty())
     {
       CLog::Log(LOGINFO, "CPlexServer::UpdateReachability skipping connection %s since we are restricted", conn->toString().c_str());
       m_connectionsLeft --;
plus

1926

index 0000000..60b4fb8
--- /dev/null
+++ b/core/client/controllers/modals/delete-tag.js
@@ -0,0 +1,34 @@
+var DeleteTagController = Ember.Controller.extend({
+    actions: {
+        confirmAccept: function () {
+            var tag = this.get('model'),
+                name = tag.get('name'),
+                self = this;
+
+            this.send('closeSettingsMenu');
+
+            tag.destroyRecord().then(function () {
+                self.notifications.showSuccess('Deleted ' + name);
+            }).catch(function (error) {
+                self.notifications.showAPIError(error);
+            });
+        },
+
+        confirmReject: function () {
+            return false;
+        }
+    },
+
+    confirm: {
+        accept: {
+            text: 'Delete',
+            buttonClass: 'btn btn-red'
+        },
+        reject: {
+            text: 'Cancel',
+            buttonClass: 'btn btn-default btn-minor'
+        }
+    }
+});
+
+export default DeleteTagController;
plus

2911

--- a/src/LiveDevelopment/LiveDevelopment.js
+++ b/src/LiveDevelopment/LiveDevelopment.js
@@ -1052,8 +1052,7 @@ define(function LiveDevelopment(require, exports, module) {
 
         // Domains for some agents must be enabled first before loading
         var enablePromise = Inspector.Page.enable().then(function () {
-            var domEnablePromise = Inspector.DOM.enable ? Inspector.DOM.enable() : (new $.Deferred()).resolve().promise();
-            domEnablePromise.then(_enableAgents);
+            return Inspector.DOM.enable().then(_enableAgents, _enableAgents);
         });
         
         enablePromise.done(function () {
plus

69

--- a/src/test/unit/specific_feature_tests/images.js
+++ b/src/test/unit/specific_feature_tests/images.js
@@ -5,6 +5,7 @@
     test,
     expect,
     equal,
+    IMG_SRC,
     deepEqual
 */
 "use strict";
@@ -17,12 +18,12 @@ test("Inserts image into a paragraph", function () {
         setCaretInSelector: 'p',
         manipulationFunc: function (wymeditor) {
             wymeditor.insertImage({
-                src: "http://example.com/example.jpg",
+                src: IMG_SRC,
                 alt: "Example"
             });
         },
         expectedResultHtml: "<p><img alt=\"Example\" " +
-            "src=\"http://example.com/example.jpg\" />Foo</p>",
+            "src=\"" + IMG_SRC + "\" />Foo</p>",
         testUndoRedo: true
     });
 });
@@ -36,11 +37,11 @@ test("Inserts image into the body", function () {
         manipulationFunc: function (wymeditor) {
             wymeditor.insertImage({
                 alt: "Example",
-                src: "http://example.com/example.jpg"
+                src: IMG_SRC
             });
         },
         expectedResultHtml: "<img alt=\"Example\" " +
-            "src=\"http://example.com/example.jpg\" /><br />",
+            "src=\"" + IMG_SRC + "\" /><br />",
         testUndoRedo: true
     });
 });
@@ -49,7 +50,7 @@ test("._selectedImage is saved on mousedown", function () {
     var initHtml = [""
         , '<p id="noimage">Images? We dont need no silly images</p>'
         , '<p>'
-            , '<img id="google" src="http://example.com/example.jpg" />'
+            , '<img id="google" src="' + IMG_SRC + '" />'
         , '</p>'
         ].join(''),
         wymeditor = jQuery.wymeditors(0),
plus

59

--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -697,7 +697,7 @@ class GCRuntime
      */
     bool grayBitsValid;
 
-    mozilla::Atomic<uintptr_t> majorGCRequested;
+    volatile uintptr_t majorGCRequested;
     JS::gcreason::Reason majorGCTriggerReason;
 
     bool minorGCRequested;
@@ -792,7 +792,7 @@ class GCRuntime
      * frame, rather than at the beginning. In this case, the next slice will be
      * delayed so that we don't get back-to-back slices.
      */
-    mozilla::Atomic<uintptr_t> interFrameGC;
+    volatile uintptr_t interFrameGC;
 
     /* Default budget for incremental GC slice. See SliceBudget in jsgc.h. */
     int64_t sliceBudget;
@@ -836,7 +836,7 @@ class GCRuntime
 
     bool poked;
 
-    mozilla::Atomic<js::HeapState> heapState;
+    volatile js::HeapState heapState;
 
     /*
      * ForkJoin workers enter and leave GC independently; this counter
plus

3

--- a/incubator/CronJobs/l10n/nl_NL.php
+++ b/incubator/CronJobs/l10n/nl_NL.php
@@ -17,7 +17,6 @@
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
  *
- * @translator Laurent Declercq (nuxwin) <l.declercq@nuxwin.com>
  * @translator Djawi de Boer (Novy) <djawi@djawi.nl>
  * @translator & review Thom Heemstra (theemstra) <thom@heemstra.us>
  */
@@ -119,7 +118,7 @@
 	"Value for the '%s' field cannot be empty." => "Waarde voor het veld '%s' mag niet leeg zijn.",
 	"Invalid value for the '%s' field." => "Ongeldige waarde voor het veld '%s'.",
 	'Unable to parse time entry.' => 'Kan tijdinvoer niet verwerken.',
-	"You're exceeding the allowed limit of %s minutes, which is the minimum interval time between each cron job execution." => "U overschrijdt de toegestane %s minuten, wat de minimale tijdsinterval tussen elke uitvoering van een cron-job is."
+	"You're exceeding the allowed limit of %s minutes, which is the minimum interval time between each cron job execution." => "U overschrijdt de toegestane %s minuten, wat de minimale tijdsinterval tussen elke uitvoering van een cron-job is.",
 	'User must be a valid UNIX user.' => 'Gebruiker dient een geldige UNIX-gebruiker te zijn.',
 	'Url must not contain any username/password for security reasons.' => 'URL mag wegens veiligheidsoverwegingen geen gebruikersnamen en/of wachtwoorden bevatten.',
 	'Command must be a valid HTTP URL.' => 'Commando dient een geldige HTTP-URL te zijn.',
plus

3

--- a/spec/redis-lock_spec.rb
+++ b/spec/redis-lock_spec.rb
@@ -143,6 +143,12 @@
 end
 
 describe Redis::Lock, '#expired' do
+  context "when there are no expired locks" do
+    it "returns an empty array" do
+      Redis::Lock.expired.should be_empty
+    end
+  end
+
   context "when there are expired locks and unexpired locks" do
     let(:expired)   { Redis::Lock.new('1', { :expire => 0.01, :key_group => key_group }) }
     let(:unexpired) { Redis::Lock.new('2', { :expire => 100,  :key_group => key_group }) }
@@ -161,5 +167,12 @@
     it "only returns locks for the current key_group" do
       Redis::Lock.expired(:key_group => 'xxx').should be_empty
     end
+
+    it "removes the key when locking then unlocking an expired lock" do
+      lock = Redis::Lock.expired(:key_group => key_group).first
+      lock.lock; lock.unlock
+
+      Redis::Lock.expired(:key_group => key_group).should be_empty
+    end
   end
 end
plus

4

--- a/tests/sandbox/playbq.c
+++ b/tests/sandbox/playbq.c
@@ -281,27 +281,12 @@ int main(int argc, char **argv)
     }
 
     // verify the file format
-    switch (sfinfo.channels) {
-    case 1:
-    case 2:
-        break;
-    default:
+    if (sfinfo.channels < 1 || sfinfo.channels > 12) {
         fprintf(stderr, "unsupported channel count %d\n", sfinfo.channels);
         goto close_sndfile;
     }
 
-    switch (sfinfo.samplerate) {
-    case  8000:
-    case 11025:
-    case 12000:
-    case 16000:
-    case 22050:
-    case 24000:
-    case 32000:
-    case 44100:
-    case 48000:
-        break;
-    default:
+    if (sfinfo.samplerate < 8000 || sfinfo.samplerate > 192000) {
         fprintf(stderr, "unsupported sample rate %d\n", sfinfo.samplerate);
         goto close_sndfile;
     }
@@ -321,8 +306,10 @@ int main(int argc, char **argv)
     case SF_FORMAT_PCM_U8:
         break;
     default:
-        fprintf(stderr, "unsupported sub-format 0x%x\n", sfinfo.format & SF_FORMAT_SUBMASK);
-        goto close_sndfile;
+        if (sfinfo.format != SF_FORMAT_WAV) {
+            fprintf(stderr, "unsupported sub-format 0x%x (0x%x)\n", sfinfo.format & SF_FORMAT_SUBMASK, sfinfo.format);
+            goto close_sndfile;
+        }
     }
 
     SLuint32 bitsPerSample;
@@ -399,8 +386,33 @@ int main(int argc, char **argv)
     format_pcm.sampleRate = sfinfo.samplerate * 1000;
     format_pcm.bitsPerSample = bitsPerSample;
     format_pcm.containerSize = format_pcm.bitsPerSample;
-    format_pcm.channelMask = 1 == format_pcm.numChannels ? SL_SPEAKER_FRONT_CENTER :
-            SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
+
+    switch (format_pcm.numChannels) {
+        case 1:
+            format_pcm.channelMask = SL_SPEAKER_FRONT_CENTER;
+            break;
+        case 2:
+            format_pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
+            break;
+        case 4:
+            format_pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT
+                                   | SL_SPEAKER_BACK_LEFT | SL_SPEAKER_BACK_RIGHT;
+            break;
+        case 6:
+            format_pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT
+                                   | SL_SPEAKER_FRONT_CENTER  | SL_SPEAKER_LOW_FREQUENCY
+                                   | SL_SPEAKER_BACK_LEFT | SL_SPEAKER_BACK_RIGHT;
+            break;
+        case 8:
+            format_pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT
+                                   | SL_SPEAKER_FRONT_CENTER  | SL_SPEAKER_LOW_FREQUENCY
+                                   | SL_SPEAKER_BACK_LEFT | SL_SPEAKER_BACK_RIGHT
+                                   | SL_SPEAKER_SIDE_LEFT | SL_SPEAKER_SIDE_RIGHT;
+            break;
+        default:
+            format_pcm.channelMask = 0;
+    }
+
     format_pcm.endianness = byteOrder;
     format_pcm.representation = transferFormat == AUDIO_FORMAT_PCM_FLOAT
             ? SL_ANDROID_PCM_REPRESENTATION_FLOAT : transferFormat == AUDIO_FORMAT_PCM_8_BIT
plus

5

--- a/audio_utils/tinysndfile.c
+++ b/audio_utils/tinysndfile.c
@@ -164,7 +164,7 @@ static SNDFILE *sf_open_read(const char *path, SF_INFO *info)
             // ignore byte rate
             // ignore block alignment
             unsigned bitsPerSample = little2u(&fmt[14]);
-            if (bitsPerSample != 8 && bitsPerSample != 16 && bitsPerSample != 32) {
+            if (bitsPerSample != 8 && bitsPerSample != 16 && bitsPerSample != 24 && bitsPerSample != 32) {
                 fprintf(stderr, "bitsPerSample %u != 8 or 16 or 32\n", bitsPerSample);
                 goto close;
             }
plus

33

--- a/cloud/amazon/ec2_vol.py
+++ b/cloud/amazon/ec2_vol.py
@@ -48,6 +48,14 @@
     required: false
     default: null
     aliases: []
+  volume_type:
+    description:
+      - Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS). "Standard" is the old EBS default
+        and continues to remain the Ansible default for backwards compatibility. 
+    required: false
+    default: standard
+    aliases: []
+    version_added: "1.8"
   iops:
     description:
       - the provisioned IOPs you want to associate with this volume (integer).
@@ -164,6 +172,14 @@
 - ec2_vol:
     instance: i-XXXXXX
     state: list
+    
+# Create new volume using SSD storage
+- local_action: 
+    module: ec2_vol 
+    instance: XXXXXX 
+    volume_size: 50 
+    volume_type: gp2
+    device_name: /dev/xvdf
 '''
 
 import sys
@@ -239,12 +255,11 @@ def create_volume(module, ec2, zone):
     iops = module.params.get('iops')
     encrypted = module.params.get('encrypted')
     volume_size = module.params.get('volume_size')
+    volume_type = module.params.get('volume_type')
     snapshot = module.params.get('snapshot')
     # If custom iops is defined we use volume_type "io1" rather than the default of "standard"
     if iops:
         volume_type = 'io1'
-    else:
-        volume_type = 'standard'
 
     # If no instance supplied, try volume creation based on module parameters.
     if name or id:
@@ -324,6 +339,7 @@ def main():
             id = dict(),
             name = dict(),
             volume_size = dict(),
+            volume_type = dict(choices=['standard', 'gp2', 'io1'], default='standard'),
             iops = dict(),
             encrypted = dict(),
             device_name = dict(),
@@ -338,6 +354,7 @@ def main():
     name = module.params.get('name')
     instance = module.params.get('instance')
     volume_size = module.params.get('volume_size')
+    volume_type = module.params.get('volume_type')
     iops = module.params.get('iops')
     encrypted = module.params.get('encrypted')
     device_name = module.params.get('device_name')
@@ -411,7 +428,7 @@ def main():
         volume = create_volume(module, ec2, zone)
         if instance:
             attach_volume(module, ec2, volume, inst)
-        module.exit_json(volume_id=volume.id, device=device_name)
+        module.exit_json(volume_id=volume.id, device=device_name, volume_type=volume.type)
 
 # import module snippets
 from ansible.module_utils.basic import *
plus

8

--- a/opac/opac-search.pl
+++ b/opac/opac-search.pl
@@ -420,10 +420,11 @@ BEGIN
 
 # limits are use to limit to results to a pre-defined category such as branch or language
 my @limits = $cgi->param('limit');
+@limits = map { uri_unescape($_) } @limits;
 my @nolimits = $cgi->param('nolimit');
+@nolimits = map { uri_unescape($_) } @nolimits;
 my %is_nolimit = map { $_ => 1 } @nolimits;
 @limits = grep { not $is_nolimit{$_} } @limits;
-@limits = map { uri_unescape($_) } @limits;
 
 if($params->{'multibranchlimit'}) {
     my $multibranch = '('.join( " or ", map { "branch: $_ " } @{ GetBranchesInCategory( $params->{'multibranchlimit'} ) } ).')';
plus

3

--- a/libjoy.h
+++ b/libjoy.h
@@ -134,6 +134,7 @@ struct _JoyStickClass {
 	guint button_pressed;
 	guint button_released;
 	guint axis_moved;
+	guint disconnected;
 };
 
 /* constructors & class functions */
plus

51

--- a/src/androidTest/java/com/couchbase/lite/replicator/ReplicationTest.java
+++ b/src/androidTest/java/com/couchbase/lite/replicator/ReplicationTest.java
@@ -3497,7 +3497,7 @@ public void testContinuousPushReplicationGoesIdleTooSoon() throws Exception{
         replication.addChangeListener(idleObserver);
         replication.start();
 
-        // 3. Wait until idle
+        // 3. Wait until idle (make sure replicator becomes IDLE state)
         boolean success = replicationIdle.await(30, TimeUnit.SECONDS);
         assertTrue(success);
         replication.removeChangeListener(idleObserver);
@@ -3505,22 +3505,28 @@ public void testContinuousPushReplicationGoesIdleTooSoon() throws Exception{
         // replicator should be idle here
 
         // 4. Add replication change listener for transition to IDLE
-        final List<Boolean> flag = new ArrayList<Boolean>();
-        flag.add(false);
-        Replication.ChangeListener listener = new Replication.ChangeListener(){
+        class ReplicationTransitionToIdleObserver implements  Replication.ChangeListener{
+            private CountDownLatch doneSignal;
+            public ReplicationTransitionToIdleObserver(CountDownLatch doneSignal) {
+                this.doneSignal = doneSignal;
+            }
             public void changed(Replication.ChangeEvent event) {
                 Log.w(Log.TAG_SYNC, "[ChangeListener.changed()] event => " + event.toString());
                 if(event.getTransition()!=null){
                     if(event.getTransition().getDestination() == ReplicationState.IDLE){
                         Log.w(Log.TAG_SYNC, "Transition to  IDLE");
                         Log.w(Log.TAG_SYNC, "Request Count => " + server.getRequestCount());
+                        doneSignal.countDown();
+
+                        // When replicator becomes IDLE state, check if all requests are completed
                         assertEquals(EXPECTED_REQUEST_COUNT, server.getRequestCount());
-                        flag.set(0, true);
                     }
                 }
             }
-        };
-        replication.addChangeListener(listener);
+        }
+        CountDownLatch replicationTransitionToIdle = new CountDownLatch(1);
+        ReplicationTransitionToIdleObserver replicationTransitionToIdleObserver = new ReplicationTransitionToIdleObserver(replicationTransitionToIdle);
+        replication.addChangeListener(replicationTransitionToIdleObserver);
         Log.w(Log.TAG_SYNC, "Added listener for transition to IDLE");
 
         // 5. Add doc(s)
@@ -3530,6 +3536,8 @@ public void changed(Replication.ChangeEvent event) {
             final Document doc = createDocWithProperties(properties1);
         }
 
+        // 6. Make sure if 4 requests are called.
+
         // _local
         RecordedRequest request1 = dispatcher.takeRequestBlocking(MockHelper.PATH_REGEX_CHECKPOINT);
         assertNotNull(request1);
@@ -3554,8 +3562,12 @@ public void changed(Replication.ChangeEvent event) {
         Log.w(Log.TAG_SYNC, "Total Requested Count => " + server.getRequestCount());
         assertEquals(EXPECTED_REQUEST_COUNT, server.getRequestCount());
 
-        // check if state change to idle from something else
-        assertEquals(Boolean.TRUE, flag.get(0));
+        // 7. Wait until idle (make sure replicator becomes IDLE state from other state)
+        // NOTE: 12/17/2014 - current code fails here because after adding listener, state never changed from IDLE
+        //       By implementing stateMachine for Replication completely, address this failure.
+        success = replicationTransitionToIdle.await(30, TimeUnit.SECONDS);
+        assertTrue(success);
+        replication.removeChangeListener(replicationTransitionToIdleObserver);
 
         stopReplication(replication);
         server.shutdown();