21 September 2015

Dates with REST services

No time, just a date. When I need a date (e.g. birth date), I would like to avoid dealing with timezones at all, because it doesn't matter what the time zone is on a date, as long as it displays the same date everywhere. I don't want the person in CST to see the date as different from the person in GMT. But the reality is that neither .NET nor Javascript have just a Date object without time and therefore time zone. So I want to use UTC midnight at all levels so that there is no chance of conversions causing a change in the way the date is saved or displayed.


Angular / Javascript


As near as I can tell, there is no way to make a Javascript Date object that is in UTC (unless the computer's local time is UTC). JS Dates are always in local time regardless of how they were created. So the first thing I need to do is give up on the Javascript Date object. It's worthless. The only way to make sure you are transmitting UTC to the server is to keep the value in ISO format string.

So as a result, I made an angular directive to accept/validate a formatted date and save it to the model in ISO format UTC. It also works with form validation. For now it only does US date format, but you are welcome to change it. If you do, I would put the format on the directive's attribute (e.g. date-format="M/d/yyyy"). No guarantees that this code has most efficient means of doing things, but it works for me.

(function () {
    angular
        .module('myModule')
        .directive('dateFormat', dateFormat);
    dateFormat.$inject = [];
    var isoUtcDateFmt = /^([0-9]{4})-([0-9]{2})-([0-9]{2})T[0\:\.]*Z?$/;
    var inputDateFmt = /^(\d{1,2})[^0-9A-Za-z](\d{1,2})[^0-9A-Za-z](\d{4,})$/;
    var dayFn = {
        1: 31,
        // Gregorian rules
        // 29 days if divisible by 4 but not 100 unless divisible by 400
        2: function (y) { return y % 4 === 0 && (% 100 !== 0 || y % 400 === 0) ? 29 : 28; },
        3: 31,
        4: 30,
        5: 31,
        6: 30,
        7: 31,
        8: 31,
        9: 30,
        10: 31,
        11: 30,
        12: 31
    };
    function loadUtc(iso) {
        if (iso === undefined || iso === null || !isoUtcDateFmt.test(iso))
            return '';
        var month, day, year;
        iso.replace(isoUtcDateFmt, function (match, y, m, d) {
            month = m * 1;
            day = d * 1;
            year = y * 1;
            return '';
        });
        return '{0}/{1}/{2}'.format(month, day, year);
    }
    function leftPad(char, len, value) {
        var s = ('' + value);
        while (s.length < len)
            s = char + s;
        return s;
    }
    function saveUtc(ymd) {
        if (ymd === null)
            return null;
        var y = leftPad('0', 4, ymd.year), m = leftPad('0', 2, ymd.month), d = leftPad('0', 2, ymd.day);
        return '{0}-{1}-{2}T00:00:00Z'.format(y, m, d);
    }
    function inputToYmd(input) {
        if (input === null || input === undefined || !inputDateFmt.test(input))
            return null;
        var month, day, year;
        input.replace(inputDateFmt, function (match, m, d, y) {
            month = m * 1;
            day = d * 1;
            year = y * 1;
            return '';
        });
        return { year: year, month: month, day: day };
    }
    function validate(ymd, maxAge, minAge) {
        if (ymd === null)
            return [null];
        var year = ymd.year, month = ymd.month, day = ymd.day;
        var errors = [];
        var maxDays = 31;
        // basic checks
        var monthValid = 1 <= month && month <= 12;
        var dayValid = false;
        // calculate max days in month
        if (monthValid) {
            var maxDaysFn = dayFn[month];
            maxDays = angular.isNumber(maxDaysFn)
                ? maxDaysFn
                : (angular.isFunction(maxDaysFn)
                    ? maxDaysFn(year)
                    : maxDays);
        }
        dayValid = 1 <= day && day <= maxDays;
        if (!monthValid)
            errors.push('Month must be 1 to 12');
        if (!dayValid)
            errors.push('Day must be 1 to {0}'.format(maxDays));
        // min/max range checking
        if (errors.length === 0) {
            var now = new Date();
            var d = new Date(now.getFullYear(), now.getMonth(), now.getDate());
            var todayTime = d.getTime();
            var todayYears = d.getFullYear();
            var minTime = d.setFullYear(todayYears - maxAge);
            var maxTime = d.setFullYear(todayYears - minAge);
            var testTime = new Date(year, month - 1, day).getTime();
            var dateValidMin = minTime <= testTime;
            var dateValidMax = testTime <= maxTime;
            if (!dateValidMin)
                errors.push('Max age is {0} years old'.format(maxAge));
            if (!dateValidMax)
                errors.push('Minimum age is {0} years old'.format(minAge));
        }
        return errors;
    }
    function dateFormat() {
        var me = this;
        return {
            scope: { dateErrors: '=' },
            require: 'ngModel',
            link: function (scope, element, attrs, ctrl) {
                var maxAge = attrs.maxAge || 1000;
                var minAge = attrs.minAge || -1000;
                //View -> Model
                ctrl.$parsers.push(function (data) {
                    if (data !== undefined && data !== null && data !== '') {
                        var ymd = inputToYmd(data);
                        var errors = validate(ymd, maxAge, minAge);
                        // set validity if possible
                        if (attrs.name)
                            ctrl.$setValidity(attrs.name, errors.length === 0);
                        // set errors if possible
                        if (scope.dateErrors)
                            scope.dateErrors = errors;
                        // send ISO date string to model
                        return saveUtc(ymd);
                    }
                    else {
                        // set errors if possible
                        if (scope.dateErrors)
                            scope.dateErrors = [];
                        return null;
                    }
                });
                //Model -> View
                ctrl.$formatters.push(function (_) {
                    var data = ctrl.$modelValue;
                    if (data !== undefined && data !== null && data !== '') {
                        var inputText = loadUtc(ctrl.$modelValue); // load from ISO date string
                        var ymd = inputToYmd(inputText);
                        var errors = validate(ymd, maxAge, minAge);
                        // set validity if possible
                        if (attrs.name)
                            ctrl.$setValidity(attrs.name, errors.length === 0);
                        // set errors if possible
                        if (scope.dateErrors)
                            scope.dateErrors = errors;
                        // send input to view
                        return inputText;
                    }
                    else {
                        // set errors if possible
                        if (scope.dateErrors)
                            scope.dateErrors = [];
                        return '';
                    }
                });
            }
        };
    }
})();

The input looks something like this:

    <input name="dob" type="text"
        required date-format max-age="110" min-age="14" date-errors="ctrl.dobErrors"
        ng-model="ctrl.dob" />
    <div ng-repeat="obj in ctrl.dobErrors track by $id(obj)">{{obj}}</div>

.NET


So when I get to the server side, I am using DateTimeOffset. The problem is that when I go to reserialize this date to JSON, the standard serializer (Newtonsoft.Json) plays dumb, only serializing to local time. Even changing the serializer settings to DateTimeZoneHandling.Utc doesn't fix it. Considering it was deserialized from UTC, the right thing would be to reserialize to UTC. JSON.NET just doesn't do the right thing here. But there is an included converter that will do the right thing with some nudging. That is link found here.


06 August 2015

Idea: Use HTML as Build Configuration

Over the past week or so, I've been trying to split out a front-end SPA application from the web services which host it. In the process, I've adopted some modern front-end tools like NPM, Bower, and Gulp. I've got it building debug and release versions (bundled and minified) of my AngularJS app.

Upon reflection on this experience, I feel that the build process using tools like Grunt and Gulp is a step backwards. I have nothing against those projects, but I feel that the process could be a lot simpler and not require learning extensive build apis/plugins/configs. (I have nothing against learning new things... in fact I love to! But the end result has to be worth it.)

My idea is that the build configuration is the source code... specifically, it is the index.html page. If you take a hard look at index.html, you realize that it is already a configuration document for your app. It glues the parts of the application together (css, scripts, images). Here is an example index.html using angular that could be run without a build step.


<!DOCTYPE html>
<html ng-app>
<head>
    <meta charset="utf-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0" />

    <title>My App</title>

    <link rel="shortcut icon" href="assets/favicon.ico" />
    <!-- css -->
    <!-- framework -->
    <link href="../bower_components/font-awesome/css/font-awesome.min.css" rel="stylesheet" />
    <link href="../bower_components/bootswatch/lumen/bootstrap.min.css" rel="stylesheet" />
    <!-- app -->
    <link href="app.css" rel="stylesheet" />
</head>
<body>
    <!-- content -->
    <ui-view></ui-view>

    <!-- js -->
    <!-- framework -->
    <script src="../bower_components/angular/angular.min.js"></script>
    <script src="../bower_components/angular-ui-router/release/angular-ui-router.min.js"></script>
    <script src="../bower_components/angular-bootstrap/ui-bootstrap-tpls.min.js"></script>
    <!-- app -->
    <script src="app.module.js"></script>
    <script src="app.config.js"></script>
    <script src="app.routes.js"></script>
</body>
</html>


So my idea is that the build process reads this file and uses it as a configuration. For a developer build, the app could be copied as-is (since it is valid, run-able html as-is). For release, my imaginary web compiler would bundle and minify referenced items. CSS urls will also have to be resolved for things like external font assets. Then the contents would be included inline in the output index.html file. It's like compiling the application to a single executable in the desktop world. That result might look something like this (whitespace included for readability):


<!DOCTYPE html>
<html ng-app>
<head>
    <meta charset="utf-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0" />

    <title>My App</title>

    <link rel="shortcut icon" href="assets/favicon.ico" />
    <style>/* all css here */</style>
</head>
<body>
    <!-- content -->
    <ui-view></ui-view>

    <script>/* all js here */</script>
</body>
</html>




xkcd


One issue with "compiling" this is that angular (and likely other frameworks) partial page templates are wired up in javascript code, not from HTML. But there is a convenient way to specify your templates in HTML using a $templateCache feature in angular. Just include them as script tags.


    <!-- templates -->
    <script type="text/ng-template" src="module/template.html"></script>


This is a pretty powerful convention. During dev, this has the effect of preloading the template into $templateCache. Then when your module requests this URI, it is loaded from cache instead of issuing a new GET request. During build, this would tell my imaginary web compiler where to locate the template so its contents can be included inline.

The dream here is that the process of developing the web application also defines the build process for free. I believe this is imminently possible because the main html page already serves as a configuration for the application. Some conventions may be needed so that run-able HTML can serve as build configuration. One convention might be compiling on save for TypeScript, Coffee, LESS, SASS, etc (since browsers can't process these uncompiled... yet). Another could be pre-loading templates.

Sometimes these conventions would not be desirable. And I'm sure there are other issues that could be mentioned -- like the annoyance of having to manually add references to index.html. But in the end, I think this could get you 90% of the way towards a working build setup, and a combination of IDE tooling (to auto-add references, among other things) and other build tools can be applied to get the remaining 10%. As it stands now, we are using the complex build tools to solve 100% of a problem which is 90% easy.

05 August 2015

EU cookie law is stupid

This post was going to be about something else entirely. But as I went to create a new post, I received notification that Blogger was inserting a notification on my blog (a.k.a. annoying the 3 people that read my blog) for compliance with the EU cookie law. Needless to say, this made me angry. Let me give you four reasons why this is stupid.

Nobody reads the notifications

The notifications are intended to notify you of what information of yours is kept in cookies. But history, UX research, and common sense has shown that users click through messages without reading them. They just want to read their content, and they don't care about the notice. Even if they tried to read the details, they written are by/for lawyers. So ultimately, the notifications are annoying and stupid. I can't wait until I find a plugin that blocks them. Currently, I just block them manually whenever I see one on a site.

The law doesn't protect you

The law purportedly gives the users a choice. But, are you really going to refuse FaceBook's cookies and thus not be able to use their website? I didn't think so. You will take their cookies and like it because you want to use their website. It doesn't require sites to stop using the cookies in undesirable ways. The site gets to have its way with your cookies, and if you don't like it your option is to hit the road.

No law can protect you

A serial killer obviously knows murder is illegal, but it still doesn't stop them. The law only serves as an after-the-fact counter-balance to the problem. Likewise, illicit sites don't care about the law anyway. They aren't going to care about displaying a notice. (If they did, it would probably just be a trick to get you to click to install malware so they could outright steal your personal information.) These laws only cause a burden on legitimate site operators and site users (whose browsing experience is interrupted by an asinine notice). Even legit websites can decide one day that they want to abuse your information (use it legally but unethically). Popular example: a megacorp buys an established community website so they can trade its credibility for short-term profit.

You are the only one who can protect you

Ultimately, you are the only one with the power to protect yourself. Will you wield that power? There are a myriad of plugins which block intrusions into your privacy. Start with an ad blocker, because the abuse targeted (and missed completely) by this law -- abusing your information for profit -- is the foundation of current internet ads. After the ad blocker, check out Disconnect.me, Ghostery, and so forth. (Hint: google privacy browser plugins)

Government Protip: You know nothing about the internet. Stop making laws for it.
Citizen Protip: You know everything about the internet. Stop allowing stupid laws to be made for it.

08 July 2015

Messaging with REST

One of the interesting bits of guidance you commonly get for REST services is the use of HTTP verbs to represent the user's intent. For instance, the URI "/customers/1" represents the customer with ID 1. Then HTTP verbs would map to CRUD actions. POST for Create (possibly on "/customers" without the ID), GET for Read, PUT for Update, and DELETE for Delete. So HTTP methods against that location would perform the appropriate action.

Exercise: Try naming your use cases using any nouns, adjectives, etc... but only use verbs that are valid HTTP methods.

But this is a very data-centric approach. What happens when you want to "undelete"? Or "merge" two items? Or deactivate all accounts with no activity for a year? Custom HTTP verbs aren't well supported. And the Uniform Interface principle doesn't want you making custom verbs anyway, because external clients and commodity software don't know how to use your custom verbs (does UNDELETE allow data to be sent like POST? or is it more like DELETE?). All that to say that HTTP verbs are not sufficient to express use cases, so URIs must often do that.

One way to still fit messaging into REST constraints is to look at the messages themselves as the resources, each with their own URI, rather than the aggregates (or whatever modeling paradigm you use) being resources. Likely, the only thing you want to do is create new messages for the server to process. Conveniently, this maps to a common/well-supported HTTP verb: POST. You can process it while the client waits or return a location header with a URL that the client can use the check on the status of a message. I suppose if you needed, you could incorporate other verbs for changing (PUT) or cancelling (DELETE) an in-flight message. Then you are back to a more standard REST definition. However, most systems don't care as much about managing their messages as they do about getting work done (by processing the messages as fast as possible). For my current project, the latter features are completely unnecessary so far.

If not trying to couch messaging-over-HTTP in terms of the REST buzzword, I think I prefer to look at the URI not as a resource, but as the address (aka route) of the handler for this particular message.


19 June 2015

Idea: Messaging over WebSocket

WebSockets seems to be an ideal protocol for messaging systems. However there are scarcely any specs that take advantage of it. Some of the ones I found are SwaggerSocket and WAMP (Web Application Messaging Protocol, not to be confused with Windows, Apache, MySQL, and PHP). But these don't quite hit the target I'm going for. WAMP, for instance, concentrates on RPC and pub/sub. SwaggerSocket goes a step too far and implements REST over WebSocket. So, I began to wonder what it would take to make an effective messaging system over WebSockets.

The minimum things (I can think of) needed to process a message on the server are: A way to route the message, and a way to deserialize the message payload. An added wrinkle is that web sockets are fully asynchronous, unlike HTTP which is typically request/response with only I/O being asynchronous (on the client anyway).

The perfect candidate to specify routing is a URI path (e.g. /system/boundedcontext/aggregate/message). And my first thought for a "wire" format for the message payload was JSON. So it naturally follows that an HTTP message would fit as a wire format for messaging over a web socket, since it has path and payload. Using HTTP messages, you could also specify different message formats with headers.

However with messaging, one feature we really don't need from HTTP is verb usage. The route (aka message name) is semantic and, if following CQRS patterns, is itself already a verb which conveys intent. Likely, a lot of the other parts of the HTTP spec are also unnecessary for messaging. A default subset could be used so it works with existing HTTP parsers. For instance, always using POST or PUT so a payload can be included. For HTTP verb militants, either verb could be argued: creating new messages to process or sending message payloads which update some other resource.

Aside
Another pattern that some people use is posting messages to return view data. That would typically be the domain of a regular HTTP GET. However, a GET can be insufficient when performing a query with complex parameters, since GETs can't have a message payload attached, and complex query parameters are troublesome. This (messaging for views) is also handy when you need to coordinate resources across different read models / services before returning a coherent view. Such a pattern would also fit nicely into messaging over web socket, but is optional.

How to implement? On the server side, there are already numerous web server implementations which parse and handle HTTP requests. One of these could be leveraged. The next step is to register a route with a handler so the message can be delivered. Deserialization can also be part of the pipeline if the deserialization format (e.g. class) is registered along with the route. The handlers themselves could be agents or actors or whatever computational model suits your fancy. One of the things I'm interested in based on reading about the Orleans framework is the virtual actor approach.

This approach could already be used on top of regular HTTP without websockets, so all the components are already out there. They just need to be assembled in such a way to easily take advantage of websocket performance, and mitigate the difficulty of full asynchrony. Maybe in my spare time... hah!

18 June 2015

Stupid F# Interop Things

I really like F#, but interop with C# libraries can be really annoying. One such example I ran across today was trying to create a value provider for JSON.NET. As part of it, you have to call a protected method on the base object. Since this base method has to be called on a list of things, it has to be called from a lambda. That's where things fall down.

Here's an example from SO on that. It also has links to other questions for more details on the "why".

http://stackoverflow.com/questions/12112377/understanding-a-change-to-protected-base-member-usage-in-f-3-0

The easiest way to make this work is to create a private method on your derived class which calls the protected method. Then in your lamba, you can call your own class private method without issue.

It's irritating and dumb to have to deal with this. Hopefully the F# team will add compiler optimizations to do this for us one day. After all the solution is straight-forward, and you always run into this in a derived class where the base class has protected members.

17 June 2015

The Power of f# Type Providers

Now call me daft, but when I first encountered information on F# Type Providers, I didn't get the big deal. I was looking at JSON parsing, and I ultimately ended up using JSON.NET again, because I didn't get it.

But later, I had reason to need to access a SQL database to import some data into my project. Then I actually tried the SQL Type Provider. In fact, I had such trouble comprehending them because it didn't enter into my mind that their capability could even exist. Having felt the pain of data access layers and object-relational mappers, I shelved the idea that it could be easy.

They automatically and dynamically generate types for your data source. Those types have IntelliSense support in Visual Studio. Basically all you have to do is provide a connection string (for SQL) or example data (for JSON and others).

For those familiar with Entity Framework, it's like that except without having to create and maintain an EDMX model. You can also query the database with "query" computation expressions... similar syntax to LINQ.

For schema-less data types, you just provide example data, and the type provider infers the types from that.

15 April 2015

Nancy with F#, some helpers for routes

Nancy is such a great framework for making HTTP services on the .NET platform, especially if you are using C#. Nancy depends on a lot of the C# trappings like dynamic variables, inheritance, and so forth. However, it's quite unpalatable when used from F#. There isn't a dynamic type in F#. The common alternative is defining my own ? operator to access members parameters. Also defining routes is just plain weird from F#, and only gets weirder when you try to do asynchronous routes. The translation from an async C# lambda is not a smooth one. There's also having to box everything because F# will not do automatic upcasting to System.Object.

I think this is why it seems like there are a good handful of Nancy frameworks for F#. Most of them do more than just provide more sugary syntax for defining routes, however, which is all I really wanted for the time being.

So here are my helper functions for making routes, especially async routes, a bit more palatable from F#.

    let private addRouteSync path f (router:NancyModule.RouteBuilder) =
        router.[path] <-
            fun (parameters:obj) ->
                f (parameters :?> DynamicDictionary) |> box

    // async is the default
    let private addRoute path f (router:NancyModule.RouteBuilder) =
        router.[path, true] <-
            fun (parameters:obj) cancellationToken ->
                async { // unwrap and box the result
                    let! result = f (parameters :?> DynamicDictionary) cancellationToken
                    return box result
                } |> Async.StartAsTask

    // more f# friendly methods
    type NancyModule with
        member me.post path f = me.Post |> addRoute path (f me)
        member me.get path f = me.Get |> addRoute path (f me)
        member me.put path f = me.Put |> addRoute path (f me)
        member me.delete path f = me.Delete |> addRoute path (f me)
        member me.patch path f = me.Patch |> addRoute path (f me)
        member me.options path f = me.Options |> addRoute path (f me)

        member me.postSync path f = me.Post |> addRouteSync path (f me)
        member me.getSync path f = me.Get |> addRouteSync path (f me)
        member me.putSync path f = me.Put |> addRouteSync path (f me)
        member me.deleteSync path f = me.Delete |> addRouteSync path (f me)
        member me.patchSync path f = me.Patch |> addRouteSync path (f me)
        member me.optionsSync path f = me.Options |> addRouteSync path (f me)

Here is a dumb example of usage, asynchronous:

        m.put "/orders/{id:string}"
            <| fun nmodule parameters cancelToken ->
                let id = parameters.["id"].ToString()
                async { return id } // echo id

Now the function you setup receives the parameters as a DynamicDictionary instead of obj. You can also return whatever you want (e.g. Response), and these helpers will box it for you before providing it back to Nancy. Your function can also directly return an async, and these helpers will convert it to a task type which Nancy expects. I'm also passing in the NancyModule in case your code hangs off an F# module (essentially static code) instead of the Nancy module class itself.

I am basically only use the NancyModule as an entry point (like a static main void) and try to remain functionally-styled with my real code.

14 April 2015

Making functions thread safe with agents

The F# MailboxProcessor class is pretty awesome. It allows you to safely run code that would ordinarily be vulnerable to concurrency problems, such as those that use a non-thread-safe collection (like my previously defined CircularDictionary) without worrying about locking.

However, it's a bit of a mess of boilerplate to setup. It also doesn't preserve the nice functional semantics of F#, since you have to use an object. It's also a bit wonky to call, what with the reply channel and all.

But I wouldn't mention it without also having a solution. Here is a little module I threw together that takes a presumably non-thread-safe function, wraps it in an agent to make it thread safe, and returns an identical function which will route the call through the agent.

module Agent =
    /// Wrap the given asynchronous 1 parameter function
    /// in a MailboxProcessor and provide a method to call it.
    let wrap fAsync =
        // create and start a new mailbox processor
        let agent =
            MailboxProcessor.Start
            <| fun inbox -> // inbox is the same as the agent itself
                let rec loop () = async { // define the async message processing loop
                    let! (message, reply) = inbox.Receive() // wait on next message
                    let! result = fAsync message // run async fn
                    reply(result) // reply
                    return! loop () // continue, tail call recursion
                }
                loop () // start the message processing loop
        // create a fn that appropriately posts messages to agent
        let fn x =
            agent.PostAndAsyncReply(fun chan -> (x, chan.Reply))
        fn // return fn

    /// Wrap the given asynchronous 2 parameter function
    /// in a MailboxProcessor and provide a method to call it.
    let wrap2 fAsync2 =
        // wrap two params into 1 tuple
        let fn1 = wrap <| fun (a,b) -> fAsync2 a b
        // convert 2 args to 1 tuple
        fun a b -> fn1 (a,b)

I am defaulting to calling functions that return asynchronous results. That can easily be changed by changing the let! to a let (without the bang). This doesn't really provide a way to stop the agent, but in my case, I don't care if messages are lost on shutdown. It's not hard to make a version that is stoppable if you care about that.

Here's how I'm calling it:

module IdGenerator =

    let Create (esConnection:IEventStoreConnection) idHistorySize =

        // changes to history are not thread safe
        let idHistory = new CircularDictionary<TrackingId, CreatedId>(idHistorySize)

        let generate trackingId prefix =
            ...

        // wrap generate function in an agent, thread-safe for great justice
        Agent.wrap2 generate

Oh yes, and since agents are asynchronous, the result of the wrapped function is asynchronous. At worst, you can call Async.RunSynchronously to wait on message to finish and get the result.

Some links on F# Async:
http://fsharpforfunandprofit.com/posts/concurrency-async-and-parallel/
https://msdn.microsoft.com/en-us/library/dd233250.aspx

12 April 2015

Weak Serialization

When I started implementing JSON-based messaging for the first time, my first step was to make a monolithic object for each use case.

public class UseCase1 // command
{
    public Guid MessageId {get; set;}
    public string TargetId {get; set;}
    public int Version {get; set;}
    public string UseCaseData1 {get; set;}
    public int UseCaseData2 {get; set;}
}

This is actually not a bad way to do it. However, I eventually became annoyed at having the same boilerplate metadata (MessageId, TargetId, Version, for instance) in every message. In grand “I love compiled objects” fashion, I decided it was possible to encapsulate any given message into one segmented contract. Something like this:

public class CommandMessage
{
    public Metadata Meta {get; set;}
    public ICommand Data {get; set;}
}
public class Metadata
{
    public Guid MessageId {get; set;}
    public string TargetId {get; set;}
    public int Version {get; set;}
}
public interface ICommand { }
public class UseCase1 : ICommand
{
    public string SomeData {get; set;}
}
I have the data and metadata separated. However, this doesn’t quite sit right. Message construction on the client now deals with 2 objects. Ok, not a big deal... You also have to make sure that you can deserialize to the proper type. This involves a type hint as part of the data the client sends. No problem I guess…

But eventually it hit me that my compiled object obsession was missing one of the biggest advantages of the JSON format. It’s something I first heard on the DDD/CQRS groups called weak serialization. And once you see it, it’s so obvious. So let me boldly state the obvious with code.

public class Metadata
{
    public Guid MessageId {get; set;}
    public string TargetId {get; set;}
    public int Version {get; set;}
}
public class UseCase1
{
    public string UseCaseData1 {get; set;}
    public int UseCaseData2 {get; set;}
}
JSON sent from client
{
    MessageId: "...",
    TargetId: "asdf-1",
    Version: 0,
    UseCaseData1: "asdf",
    UseCaseData2: 7
}
And the data turned into server objects
var meta = JsonConvert
    .Deserialize<MetaData>(requestBodyString);
var command = JsonConvert .Deserialize<UseCase1>(requestBodyString);
Yes, duh.

Lesson: don’t treat the JSON message as an “object”. Treat it as a data container (dictionary) which could represent multiple objects. This also frees me to add arbitrary meta information to any message without affecting other parts of the system… naming conflicts not withstanding.