/*
- * Copyright (C) 2011-2012 jeanfi@gmail.com
+ * Copyright (C) 2011-2014 jeanfi@gmail.com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
#include <libintl.h>
#define _(String) gettext(String)
+#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <unistd.h>
-#include <curl/curl.h>
-#include <json/json.h>
+#include <json.h>
#include "cache.h"
+#include "fcache.h"
+#include "http.h"
#include "list.h"
#include "log.h"
#include "lp_ws.h"
#include "lp_json.h"
#include "ppastats.h"
-static const char *QUERY_GET_PUBLISHED_BINARIES = "?ws.op=getPublishedBinaries";
+/** Default ws.size value for the getPublishedBinaries request. */
+static const int DEFAULT_WS_SIZE = 150;
+
static const char *QUERY_GET_DOWNLOAD_COUNT = "?ws.op=getDownloadCount";
static const char *
QUERY_GET_DAILY_DOWNLOAD_TOTALS = "?ws.op=getDailyDownloadTotals";
-static const int DEFAULT_FETCH_RETRIES = 3;
-
-static CURL *curl;
-
-struct ucontent {
- char *data;
- size_t len;
-};
-
-static size_t cbk_curl(void *buffer, size_t size, size_t nmemb, void *userp)
+static json_object *get_json_object(const char *url)
{
- size_t realsize = size * nmemb;
- struct ucontent *mem = (struct ucontent *)userp;
+ json_object *obj = NULL;
+ char *body;
- mem->data = realloc(mem->data, mem->len + realsize + 1);
+ body = get_url_content(url, 0);
- memcpy(&(mem->data[mem->len]), buffer, realsize);
- mem->len += realsize;
- mem->data[mem->len] = 0;
+ if (body) {
+ obj = json_tokener_parse(body);
- return realsize;
-}
+ free(body);
-static void init()
-{
- if (!curl) {
- log_debug(_("initializing CURL"));
- curl_global_init(CURL_GLOBAL_ALL);
- curl = curl_easy_init();
+ return obj;
}
- if (!curl)
- exit(EXIT_FAILURE);
+ return NULL;
}
-static char *fetch_url(const char *url)
+static char *get_bpph_list_cache_key(const char *archive_url)
{
- struct ucontent *content = malloc(sizeof(struct ucontent));
- char *result;
- long code;
- int retries;
+ char *key;
- log_debug(_("fetch_url(): %s"), url);
+ key = malloc(strlen(archive_url + 7) + strlen("/bpph") + 1);
+ sprintf(key, "%s/bpph", archive_url + 7);
- init();
+ return key;
+}
- result = NULL;
+static struct bpph **get_bpph_list_from_cache(const char *key)
+{
+ char *content;
+ struct bpph **list;
+ json_object *json;
- retries = DEFAULT_FETCH_RETRIES;
+ content = fcache_get(key);
+ if (!content)
+ return NULL;
- retrieve:
- content->data = malloc(1);
- content->data[0] = '\0';
- content->len = 0;
+ json = json_tokener_parse(content);
+ if (!json)
+ return NULL;
- curl_easy_setopt(curl, CURLOPT_URL, url);
- curl_easy_setopt(curl, CURLOPT_VERBOSE, 0);
- curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, cbk_curl);
- curl_easy_setopt(curl, CURLOPT_WRITEDATA, content);
- curl_easy_setopt(curl, CURLOPT_USERAGENT, "ppastats/0.0");
+ list = json_object_to_bpph_list(json);
- if (curl_easy_perform(curl) == CURLE_OK) {
- curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &code);
+ json_object_put(json);
+ free(content);
- switch (code) {
- case 200:
- result = content->data;
- break;
- case 500:
- case 502:
- case 503:
- case 504:
- log_err(_("Fetch failed with code %ld for URL %s"),
- code,
- url);
-
- if (retries) {
- log_debug(_("Wait 5s before retry"));
- sleep(5);
-
- free(content->data);
- retries--;
- goto retrieve;
- }
+ return list;
+}
- break;
- default:
- log_err(_("Fetch failed with code %ld for URL %s"),
- code,
- url);
- }
- }
+static char *get_last_creation_date(struct bpph **list)
+{
+ time_t last, t;
+ struct bpph **cur;
- if (!result)
- free(content->data);
+ last = 0;
- free(content);
+ if (list)
+ for (cur = list; *cur; cur++) {
+ t = (*cur)->date_created;
+ if (t > last)
+ last = t;
+ }
- return result;
+ if (last)
+ return time_to_str(last);
+ else
+ return NULL;
}
-static json_object *get_json_object(const char *url)
+/*
+ * 'archive_url': LP URL of the archive.
+ * 'size': size of the reply array. Between 1-300, else default value is used.
+ */
+static char *create_query_get_bpph(const char *archive_url,
+ const char *status,
+ int size)
{
- json_object *obj = NULL;
- char *body;
+ static const char *default_opt = "?ws.op=getPublishedBinaries&ws.size=";
+ static const char *status_opt = "&status=";
+ char *url;
+ size_t n;
- body = fetch_url(url);
+ if (size < 1 || size > 300)
+ size = DEFAULT_WS_SIZE;
- if (body) {
- obj = json_tokener_parse(body);
+ n = strlen(archive_url) + strlen(default_opt) + 3 + 1;
- free(body);
+ if (status)
+ n += strlen(status_opt) + strlen(status);
- return obj;
+ url = malloc(n);
+ sprintf(url, "%s%s%d", archive_url, default_opt, size);
+
+ if (status) {
+ strcat(url, status_opt);
+ strcat(url, status);
}
- return NULL;
+ return url;
}
-#define json_object_to_bpph_list \
-json_object_to_binary_package_publishing_history_list
-
-struct binary_package_publishing_history * *
-get_binary_package_publishing_history_list(const char *archive_url,
- const char *pkg_status)
+struct bpph **get_bpph_list(const char *archive_url,
+ const char *pkg_status,
+ int ws_size)
{
- struct json_object *o_next;
- char *url;
- json_object *o;
- void **result = NULL;
+ char *url, *key, *tmp;
+ struct bpph **result;
+ struct json_object *o, *bpph_json, *o_next;
+ char *date;
+ int ok;
- url = malloc(strlen(archive_url)+
- strlen(QUERY_GET_PUBLISHED_BINARIES)+
- (pkg_status ? strlen("&status=")+strlen(pkg_status) : 0)+
- 1);
+ url = create_query_get_bpph(archive_url, pkg_status, ws_size);
- strcpy(url, archive_url);
- strcat(url, QUERY_GET_PUBLISHED_BINARIES);
+ key = get_bpph_list_cache_key(archive_url);
- if (pkg_status) {
- strcat(url, "&status=");
- strcat(url, pkg_status);
+ result = get_bpph_list_from_cache(key);
+
+ if (result) {
+ date = get_last_creation_date(result);
+
+ if (date) {
+ printf("Update package since: %s\n", date);
+
+ tmp = malloc(strlen(url)
+ + strlen("&created_since_date=")
+ + strlen(date)+1);
+ strcpy(tmp, url);
+ strcat(tmp, "&created_since_date=");
+ strcat(tmp, date);
+
+ free(url);
+ url = tmp;
+
+ free(date);
+ }
}
+ ok = 1;
while (url) {
o = get_json_object(url);
free(url);
url = NULL;
- if (!o)
+ if (!o) {
+ ok = 0;
break;
+ }
- result = list_append_list(result,
- (void **)json_object_to_bpph_list(o));
+ result = bpph_list_append_list(result,
+ json_object_to_bpph_list(o));
o_next = json_object_object_get(o, "next_collection_link");
json_object_put(o);
}
- return (struct binary_package_publishing_history **)result;
+ if (ok) {
+ bpph_json = bpph_list_to_json(result);
+ fcache_put(key, json_object_to_json_string(bpph_json));
+ json_object_put(bpph_json);
+ }
+
+ free(key);
+
+ return result;
}
int get_download_count(const char *archive_url)
{
json_object *obj;
const struct distro_arch_series *distro;
+ char *content;
distro = cache_get(url);
if (distro)
return (struct distro_arch_series *)distro;
- obj = get_json_object(url);
+ content = get_url_content(url, 1);
+
+ if (!content)
+ return NULL;
+
+ obj = json_tokener_parse(content);
+
+ free(content);
if (!obj)
return NULL;
{
json_object *obj;
const struct distro_series *distro;
+ char *content;
distro = cache_get(url);
if (distro)
return (struct distro_series *)distro;
- obj = get_json_object(url);
+ content = get_url_content(url, 1);
+
+ if (!content)
+ return NULL;
+
+ obj = json_tokener_parse(content);
+
+ free(content);
if (!obj)
return NULL;
return result;
}
-void lp_ws_cleanup()
-{
- log_debug(_("cleanup CURL"));
-
- curl_easy_cleanup(curl);
- curl_global_cleanup();
-}