Left: | ||
Right: |
LEFT | RIGHT |
---|---|
1 # coding: utf-8 | 1 # coding: utf-8 |
2 | 2 |
3 # This file is part of the Adblock Plus web scripts, | 3 # This file is part of the Adblock Plus web scripts, |
4 # Copyright (C) 2006-2015 Eyeo GmbH | 4 # Copyright (C) 2006-2015 Eyeo GmbH |
5 # | 5 # |
6 # Adblock Plus is free software: you can redistribute it and/or modify | 6 # Adblock Plus is free software: you can redistribute it and/or modify |
7 # it under the terms of the GNU General Public License version 3 as | 7 # it under the terms of the GNU General Public License version 3 as |
8 # published by the Free Software Foundation. | 8 # published by the Free Software Foundation. |
9 # | 9 # |
10 # Adblock Plus is distributed in the hope that it will be useful, | 10 # Adblock Plus is distributed in the hope that it will be useful, |
(...skipping 23 matching lines...) Expand all Loading... | |
34 logger = logging.getLogger("cms.bin.translate") | 34 logger = logging.getLogger("cms.bin.translate") |
35 | 35 |
36 class CrowdinAPI: | 36 class CrowdinAPI: |
37 FILES_PER_REQUEST = 20 | 37 FILES_PER_REQUEST = 20 |
38 | 38 |
39 def __init__(self, api_key, project_name): | 39 def __init__(self, api_key, project_name): |
40 self.api_key = api_key | 40 self.api_key = api_key |
41 self.project_name = project_name | 41 self.project_name = project_name |
42 self.connection = urllib3.connection_from_url("https://api.crowdin.com/") | 42 self.connection = urllib3.connection_from_url("https://api.crowdin.com/") |
43 | 43 |
44 def raw_request(self, request_method, api_endpoint, query_params, **kwargs): | 44 def raw_request(self, request_method, api_endpoint, query_params=(), **kwargs) : |
45 url = "/api/project/%s/%s?%s" % ( | 45 url = "/api/project/%s/%s?%s" % ( |
46 urllib.quote(self.project_name), | 46 urllib.quote(self.project_name), |
47 urllib.quote(api_endpoint), | 47 urllib.quote(api_endpoint), |
48 urllib.urlencode([("key", self.api_key)] + query_params) | 48 urllib.urlencode((("key", self.api_key),) + query_params) |
49 ) | 49 ) |
50 try: | 50 try: |
51 response = self.connection.request( | 51 response = self.connection.request( |
52 request_method, str(url), **kwargs | 52 request_method, str(url), **kwargs |
Sebastian Noack
2015/07/15 10:31:03
Converting url to an str object seems to be unnece
kzar
2015/07/15 11:09:03
For some reason the URL was ending up being unicod
Sebastian Noack
2015/07/15 11:27:11
So far for urllib3. This is certainly a silly beha
| |
53 ) | 53 ) |
54 | |
55 if response.status < 200 or response.status >= 300: | |
56 raise urllib3.exceptions.HTTPError(response.status) | |
57 | |
58 return response | |
Sebastian Noack
2015/07/15 10:31:03
Nit: The return doesn't need to be in the try..cat
kzar
2015/07/15 11:09:03
Done.
| |
59 except urllib3.exceptions.HTTPError: | 54 except urllib3.exceptions.HTTPError: |
55 logger.error("Connection to API endpoint %s failed", url) | |
56 raise | |
57 if response.status < 200 or response.status >= 300: | |
60 logger.error("API call to %s failed:\n%s", url, response.data) | 58 logger.error("API call to %s failed:\n%s", url, response.data) |
Sebastian Noack
2015/07/15 10:31:03
If self.connect.request() fails, "response" wouldn
kzar
2015/07/15 11:09:03
Whoops, Done.
| |
61 raise | 59 raise urllib3.exceptions.HTTPError(response.status) |
60 return response | |
62 | 61 |
63 def request(self, request_method, api_endpoint, data=None, files=None): | 62 def request(self, request_method, api_endpoint, data=None, files=None): |
64 fields = [] | 63 fields = [] |
65 if data: | 64 if data: |
66 for name, value in data.iteritems(): | 65 for name, value in data.iteritems(): |
67 if isinstance(value, basestring): | 66 if isinstance(value, basestring): |
68 fields.append((name, value)) | 67 fields.append((name, value)) |
69 else: | 68 else: |
70 fields.extend((name + "[]", v) for v in value) | 69 fields.extend((name + "[]", v) for v in value) |
71 if files: | 70 if files: |
72 fields.extend(("files[%s]" % f[0], f) for f in files) | 71 fields.extend(("files[%s]" % f[0], f) for f in files) |
73 | 72 |
74 response = self.raw_request( | 73 response = self.raw_request( |
75 request_method, api_endpoint, [("json", "1")], | 74 request_method, api_endpoint, (("json", "1"),), |
76 fields=fields, preload_content=False | 75 fields=fields, preload_content=False |
77 ) | 76 ) |
78 | 77 |
79 try: | 78 try: |
80 return json.load(response) | 79 return json.load(response) |
81 except ValueError: | 80 except ValueError: |
82 logger.error("Invalid response returned by API endpoint %s", url) | 81 logger.error("Invalid response returned by API endpoint %s", url) |
83 raise | 82 raise |
84 | 83 |
85 | 84 |
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
218 crowdin_api.request("POST", "delete-directory", data={"name": directory}) | 217 crowdin_api.request("POST", "delete-directory", data={"name": directory}) |
219 | 218 |
220 def download_translations(crowdin_api, source_dir, required_locales): | 219 def download_translations(crowdin_api, source_dir, required_locales): |
221 logger.info("Requesting generation of fresh translations archive...") | 220 logger.info("Requesting generation of fresh translations archive...") |
222 result = crowdin_api.request("GET", "export") | 221 result = crowdin_api.request("GET", "export") |
223 if result.get("success", {}).get("status") == "skipped": | 222 if result.get("success", {}).get("status") == "skipped": |
224 logger.warning("Archive generation skipped, either " | 223 logger.warning("Archive generation skipped, either " |
225 "no changes or API usage excessive") | 224 "no changes or API usage excessive") |
226 | 225 |
227 logger.info("Downloading translations archive...") | 226 logger.info("Downloading translations archive...") |
228 response = crowdin_api.raw_request("GET", "download/all.zip", []) | 227 response = crowdin_api.raw_request("GET", "download/all.zip") |
229 | 228 |
230 logger.info("Extracting translations archive...") | 229 logger.info("Extracting translations archive...") |
231 with zipfile.ZipFile(io.BytesIO(response.data), "r") as archive: | 230 with zipfile.ZipFile(io.BytesIO(response.data), "r") as archive: |
232 locale_path = os.path.join(source_dir, "locales") | 231 locale_path = os.path.join(source_dir, "locales") |
233 # First clear existing translation files | 232 # First clear existing translation files |
234 for root, dirs, files in os.walk(locale_path, topdown=True): | 233 for root, dirs, files in os.walk(locale_path, topdown=True): |
235 if root == locale_path: | 234 if root == locale_path: |
236 dirs[:] = [d for d in dirs if d in required_locales] | 235 dirs[:] = [d for d in dirs if d in required_locales] |
237 for f in files: | 236 for f in files: |
238 if f.lower().endswith(".json"): | 237 if f.lower().endswith(".json"): |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
292 if __name__ == "__main__": | 291 if __name__ == "__main__": |
293 if len(sys.argv) < 3: | 292 if len(sys.argv) < 3: |
294 print >>sys.stderr, "Usage: python -m cms.bin.translate www_directory crowdi n_project_api_key [logging_level]" | 293 print >>sys.stderr, "Usage: python -m cms.bin.translate www_directory crowdi n_project_api_key [logging_level]" |
295 sys.exit(1) | 294 sys.exit(1) |
296 | 295 |
297 logging.basicConfig() | 296 logging.basicConfig() |
298 logger.setLevel(sys.argv[3] if len(sys.argv) > 3 else logging.INFO) | 297 logger.setLevel(sys.argv[3] if len(sys.argv) > 3 else logging.INFO) |
299 | 298 |
300 source_dir, crowdin_api_key = sys.argv[1:3] | 299 source_dir, crowdin_api_key = sys.argv[1:3] |
301 crowdin_sync(source_dir, crowdin_api_key) | 300 crowdin_sync(source_dir, crowdin_api_key) |
LEFT | RIGHT |