mcap.c 69 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. *
  4. * BlueZ - Bluetooth protocol stack for Linux
  5. *
  6. * Copyright (C) 2010 GSyC/LibreSoft, Universidad Rey Juan Carlos.
  7. * Copyright (C) 2010 Signove
  8. * Copyright (C) 2014 Intel Corporation. All rights reserved.
  9. *
  10. */
  11. #ifdef HAVE_CONFIG_H
  12. #include <config.h>
  13. #endif
  14. #define _GNU_SOURCE
  15. #include <netinet/in.h>
  16. #include <stdlib.h>
  17. #include <errno.h>
  18. #include <unistd.h>
  19. #include <time.h>
  20. #include <glib.h>
  21. #include "lib/bluetooth.h"
  22. #include "bluetooth/l2cap.h"
  23. #include "btio/btio.h"
  24. #include "src/log.h"
  25. #include "src/shared/timeout.h"
  26. #include "mcap.h"
  27. #define MCAP_BTCLOCK_HALF (MCAP_BTCLOCK_FIELD / 2)
  28. #define CLK CLOCK_MONOTONIC
  29. #define MCAP_CSP_ERROR g_quark_from_static_string("mcap-csp-error-quark")
  30. #define MAX_RETRIES 10
  31. #define SAMPLE_COUNT 20
  32. #define RESPONSE_TIMER 6 /* seconds */
  33. #define MAX_CACHED 10 /* 10 devices */
  34. #define MCAP_ERROR g_quark_from_static_string("mcap-error-quark")
  35. #define RELEASE_TIMER(__mcl) do { \
  36. if (__mcl->tid) { \
  37. timeout_remove(__mcl->tid); \
  38. __mcl->tid = 0; \
  39. } \
  40. } while(0)
  41. struct mcap_csp {
  42. uint64_t base_tmstamp; /* CSP base timestamp */
  43. struct timespec base_time; /* CSP base time when timestamp set */
  44. guint local_caps; /* CSP-Cent.: have got remote caps */
  45. guint remote_caps; /* CSP-Perip: remote central got caps */
  46. guint rem_req_acc; /* CSP-Perip: accuracy req by central */
  47. guint ind_expected; /* CSP-Cent.: indication expected */
  48. uint8_t csp_req; /* CSP-Cent.: Request control flag */
  49. guint ind_timer; /* CSP-Perip: indication timer */
  50. guint set_timer; /* CSP-Perip: delayed set timer */
  51. void *set_data; /* CSP-Perip: delayed set data */
  52. void *csp_priv_data; /* CSP-Cent.: In-flight request data */
  53. };
  54. struct mcap_sync_cap_cbdata {
  55. mcap_sync_cap_cb cb;
  56. gpointer user_data;
  57. };
  58. struct mcap_sync_set_cbdata {
  59. mcap_sync_set_cb cb;
  60. gpointer user_data;
  61. };
  62. struct csp_caps {
  63. int ts_acc; /* timestamp accuracy */
  64. int ts_res; /* timestamp resolution */
  65. int latency; /* Read BT clock latency */
  66. int preempt_thresh; /* Preemption threshold for latency */
  67. int syncleadtime_ms; /* SyncLeadTime in ms */
  68. };
  69. struct sync_set_data {
  70. uint8_t update;
  71. uint32_t sched_btclock;
  72. uint64_t timestamp;
  73. int ind_freq;
  74. gboolean role;
  75. };
  76. struct connect_mcl {
  77. struct mcap_mcl *mcl; /* MCL for this operation */
  78. mcap_mcl_connect_cb connect_cb; /* Connect callback */
  79. GDestroyNotify destroy; /* Destroy callback */
  80. gpointer user_data; /* Callback user data */
  81. };
  82. typedef union {
  83. mcap_mdl_operation_cb op;
  84. mcap_mdl_operation_conf_cb op_conf;
  85. mcap_mdl_notify_cb notify;
  86. } mcap_cb_type;
  87. struct mcap_mdl_op_cb {
  88. struct mcap_mdl *mdl; /* MDL for this operation */
  89. mcap_cb_type cb; /* Operation callback */
  90. GDestroyNotify destroy; /* Destroy callback */
  91. gpointer user_data; /* Callback user data */
  92. };
  93. /* MCAP finite state machine functions */
  94. static void proc_req_connected(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t l);
  95. static void proc_req_pending(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t l);
  96. static void proc_req_active(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t l);
  97. static void (*proc_req[])(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len) = {
  98. proc_req_connected,
  99. proc_req_pending,
  100. proc_req_active
  101. };
  102. static gboolean csp_caps_initialized = FALSE;
  103. struct csp_caps _caps;
  104. static void mcap_cache_mcl(struct mcap_mcl *mcl);
  105. static void default_mdl_connected_cb(struct mcap_mdl *mdl, gpointer data)
  106. {
  107. DBG("MCAP Unmanaged mdl connection");
  108. }
  109. static void default_mdl_closed_cb(struct mcap_mdl *mdl, gpointer data)
  110. {
  111. DBG("MCAP Unmanaged mdl closed");
  112. }
  113. static void default_mdl_deleted_cb(struct mcap_mdl *mdl, gpointer data)
  114. {
  115. DBG("MCAP Unmanaged mdl deleted");
  116. }
  117. static void default_mdl_aborted_cb(struct mcap_mdl *mdl, gpointer data)
  118. {
  119. DBG("MCAP Unmanaged mdl aborted");
  120. }
  121. static uint8_t default_mdl_conn_req_cb(struct mcap_mcl *mcl,
  122. uint8_t mdepid, uint16_t mdlid,
  123. uint8_t *conf, gpointer data)
  124. {
  125. DBG("MCAP mdl remote connection aborted");
  126. /* Due to this callback isn't managed this request won't be supported */
  127. return MCAP_REQUEST_NOT_SUPPORTED;
  128. }
  129. static uint8_t default_mdl_reconn_req_cb(struct mcap_mdl *mdl,
  130. gpointer data)
  131. {
  132. DBG("MCAP mdl remote reconnection aborted");
  133. /* Due to this callback isn't managed this request won't be supported */
  134. return MCAP_REQUEST_NOT_SUPPORTED;
  135. }
  136. static void set_default_cb(struct mcap_mcl *mcl)
  137. {
  138. if (!mcl->cb)
  139. mcl->cb = g_new0(struct mcap_mdl_cb, 1);
  140. mcl->cb->mdl_connected = default_mdl_connected_cb;
  141. mcl->cb->mdl_closed = default_mdl_closed_cb;
  142. mcl->cb->mdl_deleted = default_mdl_deleted_cb;
  143. mcl->cb->mdl_aborted = default_mdl_aborted_cb;
  144. mcl->cb->mdl_conn_req = default_mdl_conn_req_cb;
  145. mcl->cb->mdl_reconn_req = default_mdl_reconn_req_cb;
  146. }
  147. static char *error2str(uint8_t rc)
  148. {
  149. switch (rc) {
  150. case MCAP_SUCCESS:
  151. return "Success";
  152. case MCAP_INVALID_OP_CODE:
  153. return "Invalid Op Code";
  154. case MCAP_INVALID_PARAM_VALUE:
  155. return "Invalid Parameter Value";
  156. case MCAP_INVALID_MDEP:
  157. return "Invalid MDEP";
  158. case MCAP_MDEP_BUSY:
  159. return "MDEP Busy";
  160. case MCAP_INVALID_MDL:
  161. return "Invalid MDL";
  162. case MCAP_MDL_BUSY:
  163. return "MDL Busy";
  164. case MCAP_INVALID_OPERATION:
  165. return "Invalid Operation";
  166. case MCAP_RESOURCE_UNAVAILABLE:
  167. return "Resource Unavailable";
  168. case MCAP_UNSPECIFIED_ERROR:
  169. return "Unspecified Error";
  170. case MCAP_REQUEST_NOT_SUPPORTED:
  171. return "Request Not Supported";
  172. case MCAP_CONFIGURATION_REJECTED:
  173. return "Configuration Rejected";
  174. default:
  175. return "Unknown Response Code";
  176. }
  177. }
  178. static gboolean mcap_send_std_opcode(struct mcap_mcl *mcl, void *cmd,
  179. uint32_t size, GError **err)
  180. {
  181. if (mcl->state == MCL_IDLE) {
  182. g_set_error(err, MCAP_ERROR, MCAP_ERROR_FAILED,
  183. "MCL is not connected");
  184. return FALSE;
  185. }
  186. if (mcl->req != MCL_AVAILABLE) {
  187. g_set_error(err, MCAP_ERROR, MCAP_ERROR_RESOURCE_UNAVAILABLE,
  188. "Pending request");
  189. return FALSE;
  190. }
  191. if (!(mcl->ctrl & MCAP_CTRL_STD_OP)) {
  192. g_set_error(err, MCAP_ERROR, MCAP_ERROR_REQUEST_NOT_SUPPORTED,
  193. "Remote does not support standard opcodes");
  194. return FALSE;
  195. }
  196. if (mcl->state == MCL_PENDING) {
  197. g_set_error(err, MCAP_ERROR, MCAP_ERROR_INVALID_OPERATION,
  198. "Not Std Op. Codes can be sent in PENDING State");
  199. return FALSE;
  200. }
  201. if (mcap_send_data(g_io_channel_unix_get_fd(mcl->cc), cmd, size) < 0) {
  202. g_set_error(err, MCAP_ERROR, MCAP_ERROR_FAILED,
  203. "Command can't be sent, write error");
  204. return FALSE;
  205. }
  206. mcl->lcmd = cmd;
  207. mcl->req = MCL_WAITING_RSP;
  208. return TRUE;
  209. }
  210. static void update_mcl_state(struct mcap_mcl *mcl)
  211. {
  212. GSList *l;
  213. struct mcap_mdl *mdl;
  214. if (mcl->state == MCL_PENDING)
  215. return;
  216. for (l = mcl->mdls; l; l = l->next) {
  217. mdl = l->data;
  218. if (mdl->state == MDL_CONNECTED) {
  219. mcl->state = MCL_ACTIVE;
  220. return;
  221. }
  222. }
  223. mcl->state = MCL_CONNECTED;
  224. }
  225. static void shutdown_mdl(struct mcap_mdl *mdl)
  226. {
  227. mdl->state = MDL_CLOSED;
  228. if (mdl->wid) {
  229. g_source_remove(mdl->wid);
  230. mdl->wid = 0;
  231. }
  232. if (mdl->dc) {
  233. g_io_channel_shutdown(mdl->dc, TRUE, NULL);
  234. g_io_channel_unref(mdl->dc);
  235. mdl->dc = NULL;
  236. }
  237. }
  238. static void free_mdl(struct mcap_mdl *mdl)
  239. {
  240. if (!mdl)
  241. return;
  242. mcap_mcl_unref(mdl->mcl);
  243. g_free(mdl);
  244. }
  245. static int cmp_mdl_state(gconstpointer a, gconstpointer b)
  246. {
  247. const struct mcap_mdl *mdl = a;
  248. const MDLState *st = b;
  249. if (mdl->state == *st)
  250. return 0;
  251. else if (mdl->state < *st)
  252. return -1;
  253. else
  254. return 1;
  255. }
  256. static void free_mcap_mdl_op(struct mcap_mdl_op_cb *op)
  257. {
  258. if (op->destroy)
  259. op->destroy(op->user_data);
  260. if (op->mdl)
  261. mcap_mdl_unref(op->mdl);
  262. g_free(op);
  263. }
  264. static void free_mcl_priv_data(struct mcap_mcl *mcl)
  265. {
  266. free_mcap_mdl_op(mcl->priv_data);
  267. mcl->priv_data = NULL;
  268. }
  269. static void mcap_notify_error(struct mcap_mcl *mcl, GError *err)
  270. {
  271. struct mcap_mdl_op_cb *con = mcl->priv_data;
  272. struct mcap_mdl *mdl;
  273. MDLState st;
  274. GSList *l;
  275. if (!con || !mcl->lcmd)
  276. return;
  277. switch (mcl->lcmd[0]) {
  278. case MCAP_MD_CREATE_MDL_REQ:
  279. st = MDL_WAITING;
  280. l = g_slist_find_custom(mcl->mdls, &st, cmp_mdl_state);
  281. mdl = l->data;
  282. mcl->mdls = g_slist_remove(mcl->mdls, mdl);
  283. mcap_mdl_unref(mdl);
  284. update_mcl_state(mcl);
  285. con->cb.op_conf(NULL, 0, err, con->user_data);
  286. break;
  287. case MCAP_MD_ABORT_MDL_REQ:
  288. st = MDL_WAITING;
  289. l = g_slist_find_custom(mcl->mdls, &st, cmp_mdl_state);
  290. shutdown_mdl(l->data);
  291. update_mcl_state(mcl);
  292. con->cb.notify(err, con->user_data);
  293. break;
  294. case MCAP_MD_DELETE_MDL_REQ:
  295. for (l = mcl->mdls; l; l = l->next) {
  296. mdl = l->data;
  297. if (mdl->state == MDL_DELETING)
  298. mdl->state = (mdl->dc) ? MDL_CONNECTED :
  299. MDL_CLOSED;
  300. }
  301. update_mcl_state(mcl);
  302. con->cb.notify(err, con->user_data);
  303. break;
  304. case MCAP_MD_RECONNECT_MDL_REQ:
  305. st = MDL_WAITING;
  306. l = g_slist_find_custom(mcl->mdls, &st, cmp_mdl_state);
  307. shutdown_mdl(l->data);
  308. update_mcl_state(mcl);
  309. con->cb.op(NULL, err, con->user_data);
  310. break;
  311. }
  312. free_mcl_priv_data(mcl);
  313. g_free(mcl->lcmd);
  314. mcl->lcmd = NULL;
  315. }
  316. int mcap_send_data(int sock, const void *buf, uint32_t size)
  317. {
  318. const uint8_t *buf_b = buf;
  319. uint32_t sent = 0;
  320. while (sent < size) {
  321. int n = write(sock, buf_b + sent, size - sent);
  322. if (n < 0)
  323. return -1;
  324. sent += n;
  325. }
  326. return 0;
  327. }
  328. static int mcap_send_cmd(struct mcap_mcl *mcl, uint8_t oc, uint8_t rc,
  329. uint16_t mdl, uint8_t *data, size_t len)
  330. {
  331. mcap_rsp *cmd;
  332. int sock, sent;
  333. if (mcl->cc == NULL)
  334. return -1;
  335. sock = g_io_channel_unix_get_fd(mcl->cc);
  336. cmd = g_malloc(sizeof(mcap_rsp) + len);
  337. cmd->op = oc;
  338. cmd->rc = rc;
  339. cmd->mdl = htons(mdl);
  340. if (data && len > 0)
  341. memcpy(cmd->data, data, len);
  342. sent = mcap_send_data(sock, cmd, sizeof(mcap_rsp) + len);
  343. g_free(cmd);
  344. return sent;
  345. }
  346. static struct mcap_mdl *get_mdl(struct mcap_mcl *mcl, uint16_t mdlid)
  347. {
  348. GSList *l;
  349. struct mcap_mdl *mdl;
  350. for (l = mcl->mdls; l; l = l->next) {
  351. mdl = l->data;
  352. if (mdlid == mdl->mdlid)
  353. return mdl;
  354. }
  355. return NULL;
  356. }
  357. static uint16_t generate_mdlid(struct mcap_mcl *mcl)
  358. {
  359. uint16_t mdlid = mcl->next_mdl;
  360. struct mcap_mdl *mdl;
  361. do {
  362. mdl = get_mdl(mcl, mdlid);
  363. if (!mdl) {
  364. mcl->next_mdl = (mdlid % MCAP_MDLID_FINAL) + 1;
  365. return mdlid;
  366. } else
  367. mdlid = (mdlid % MCAP_MDLID_FINAL) + 1;
  368. } while (mdlid != mcl->next_mdl);
  369. /* No more mdlids availables */
  370. return 0;
  371. }
  372. static mcap_md_req *create_req(uint8_t op, uint16_t mdl_id)
  373. {
  374. mcap_md_req *req_cmd;
  375. req_cmd = g_new0(mcap_md_req, 1);
  376. req_cmd->op = op;
  377. req_cmd->mdl = htons(mdl_id);
  378. return req_cmd;
  379. }
  380. static mcap_md_create_mdl_req *create_mdl_req(uint16_t mdl_id, uint8_t mdep,
  381. uint8_t conf)
  382. {
  383. mcap_md_create_mdl_req *req_mdl;
  384. req_mdl = g_new0(mcap_md_create_mdl_req, 1);
  385. req_mdl->op = MCAP_MD_CREATE_MDL_REQ;
  386. req_mdl->mdl = htons(mdl_id);
  387. req_mdl->mdep = mdep;
  388. req_mdl->conf = conf;
  389. return req_mdl;
  390. }
  391. static int compare_mdl(gconstpointer a, gconstpointer b)
  392. {
  393. const struct mcap_mdl *mdla = a;
  394. const struct mcap_mdl *mdlb = b;
  395. if (mdla->mdlid == mdlb->mdlid)
  396. return 0;
  397. else if (mdla->mdlid < mdlb->mdlid)
  398. return -1;
  399. else
  400. return 1;
  401. }
  402. static bool wait_response_timer(gpointer data)
  403. {
  404. struct mcap_mcl *mcl = data;
  405. GError *gerr = NULL;
  406. RELEASE_TIMER(mcl);
  407. g_set_error(&gerr, MCAP_ERROR, MCAP_ERROR_FAILED,
  408. "Timeout waiting response");
  409. mcap_notify_error(mcl, gerr);
  410. g_error_free(gerr);
  411. mcl->mi->mcl_disconnected_cb(mcl, mcl->mi->user_data);
  412. mcap_cache_mcl(mcl);
  413. return FALSE;
  414. }
  415. gboolean mcap_create_mdl(struct mcap_mcl *mcl,
  416. uint8_t mdepid,
  417. uint8_t conf,
  418. mcap_mdl_operation_conf_cb connect_cb,
  419. gpointer user_data,
  420. GDestroyNotify destroy,
  421. GError **err)
  422. {
  423. struct mcap_mdl *mdl;
  424. struct mcap_mdl_op_cb *con;
  425. mcap_md_create_mdl_req *cmd;
  426. uint16_t id;
  427. id = generate_mdlid(mcl);
  428. if (!id) {
  429. g_set_error(err, MCAP_ERROR, MCAP_ERROR_FAILED,
  430. "Not more mdlids available");
  431. return FALSE;
  432. }
  433. mdl = g_new0(struct mcap_mdl, 1);
  434. mdl->mcl = mcap_mcl_ref(mcl);
  435. mdl->mdlid = id;
  436. mdl->mdep_id = mdepid;
  437. mdl->state = MDL_WAITING;
  438. con = g_new0(struct mcap_mdl_op_cb, 1);
  439. con->mdl = mcap_mdl_ref(mdl);
  440. con->cb.op_conf = connect_cb;
  441. con->destroy = destroy;
  442. con->user_data = user_data;
  443. cmd = create_mdl_req(id, mdepid, conf);
  444. if (!mcap_send_std_opcode(mcl, cmd, sizeof(mcap_md_create_mdl_req),
  445. err)) {
  446. mcap_mdl_unref(con->mdl);
  447. g_free(con);
  448. g_free(cmd);
  449. return FALSE;
  450. }
  451. mcl->state = MCL_ACTIVE;
  452. mcl->priv_data = con;
  453. mcl->mdls = g_slist_insert_sorted(mcl->mdls, mcap_mdl_ref(mdl),
  454. compare_mdl);
  455. mcl->tid = timeout_add_seconds(RESPONSE_TIMER, wait_response_timer,
  456. mcl, NULL);
  457. return TRUE;
  458. }
  459. gboolean mcap_reconnect_mdl(struct mcap_mdl *mdl,
  460. mcap_mdl_operation_cb reconnect_cb,
  461. gpointer user_data,
  462. GDestroyNotify destroy,
  463. GError **err)
  464. {
  465. struct mcap_mdl_op_cb *con;
  466. struct mcap_mcl *mcl = mdl->mcl;
  467. mcap_md_req *cmd;
  468. if (mdl->state != MDL_CLOSED) {
  469. g_set_error(err, MCAP_ERROR, MCAP_ERROR_FAILED,
  470. "MDL is not closed");
  471. return FALSE;
  472. }
  473. cmd = create_req(MCAP_MD_RECONNECT_MDL_REQ, mdl->mdlid);
  474. if (!mcap_send_std_opcode(mcl, cmd, sizeof(mcap_md_req), err)) {
  475. g_free(cmd);
  476. return FALSE;
  477. }
  478. mdl->state = MDL_WAITING;
  479. con = g_new0(struct mcap_mdl_op_cb, 1);
  480. con->mdl = mcap_mdl_ref(mdl);
  481. con->cb.op = reconnect_cb;
  482. con->destroy = destroy;
  483. con->user_data = user_data;
  484. mcl->state = MCL_ACTIVE;
  485. mcl->priv_data = con;
  486. mcl->tid = timeout_add_seconds(RESPONSE_TIMER, wait_response_timer,
  487. mcl, NULL);
  488. return TRUE;
  489. }
  490. static gboolean send_delete_req(struct mcap_mcl *mcl,
  491. struct mcap_mdl_op_cb *con,
  492. uint16_t mdlid,
  493. GError **err)
  494. {
  495. mcap_md_req *cmd;
  496. cmd = create_req(MCAP_MD_DELETE_MDL_REQ, mdlid);
  497. if (!mcap_send_std_opcode(mcl, cmd, sizeof(mcap_md_req), err)) {
  498. g_free(cmd);
  499. return FALSE;
  500. }
  501. mcl->priv_data = con;
  502. mcl->tid = timeout_add_seconds(RESPONSE_TIMER, wait_response_timer,
  503. mcl, NULL);
  504. return TRUE;
  505. }
  506. gboolean mcap_delete_all_mdls(struct mcap_mcl *mcl,
  507. mcap_mdl_notify_cb delete_cb,
  508. gpointer user_data,
  509. GDestroyNotify destroy,
  510. GError **err)
  511. {
  512. GSList *l;
  513. struct mcap_mdl *mdl;
  514. struct mcap_mdl_op_cb *con;
  515. DBG("MCL in state: %d", mcl->state);
  516. if (!mcl->mdls) {
  517. g_set_error(err, MCAP_ERROR, MCAP_ERROR_FAILED,
  518. "There are not MDLs created");
  519. return FALSE;
  520. }
  521. for (l = mcl->mdls; l; l = l->next) {
  522. mdl = l->data;
  523. if (mdl->state != MDL_WAITING)
  524. mdl->state = MDL_DELETING;
  525. }
  526. con = g_new0(struct mcap_mdl_op_cb, 1);
  527. con->mdl = NULL;
  528. con->cb.notify = delete_cb;
  529. con->destroy = destroy;
  530. con->user_data = user_data;
  531. if (!send_delete_req(mcl, con, MCAP_ALL_MDLIDS, err)) {
  532. g_free(con);
  533. return FALSE;
  534. }
  535. return TRUE;
  536. }
  537. gboolean mcap_delete_mdl(struct mcap_mdl *mdl, mcap_mdl_notify_cb delete_cb,
  538. gpointer user_data,
  539. GDestroyNotify destroy,
  540. GError **err)
  541. {
  542. struct mcap_mcl *mcl= mdl->mcl;
  543. struct mcap_mdl_op_cb *con;
  544. GSList *l;
  545. l = g_slist_find(mcl->mdls, mdl);
  546. if (!l) {
  547. g_set_error(err, MCAP_ERROR, MCAP_ERROR_INVALID_MDL,
  548. "%s" , error2str(MCAP_INVALID_MDEP));
  549. return FALSE;
  550. }
  551. if (mdl->state == MDL_WAITING) {
  552. g_set_error(err, MCAP_ERROR, MCAP_ERROR_FAILED,
  553. "Mdl is not created");
  554. return FALSE;
  555. }
  556. mdl->state = MDL_DELETING;
  557. con = g_new0(struct mcap_mdl_op_cb, 1);
  558. con->mdl = mcap_mdl_ref(mdl);
  559. con->cb.notify = delete_cb;
  560. con->destroy = destroy;
  561. con->user_data = user_data;
  562. if (!send_delete_req(mcl, con, mdl->mdlid, err)) {
  563. mcap_mdl_unref(con->mdl);
  564. g_free(con);
  565. return FALSE;
  566. }
  567. return TRUE;
  568. }
  569. gboolean mcap_mdl_abort(struct mcap_mdl *mdl, mcap_mdl_notify_cb abort_cb,
  570. gpointer user_data,
  571. GDestroyNotify destroy,
  572. GError **err)
  573. {
  574. struct mcap_mdl_op_cb *con;
  575. struct mcap_mcl *mcl = mdl->mcl;
  576. mcap_md_req *cmd;
  577. if (mdl->state != MDL_WAITING) {
  578. g_set_error(err, MCAP_ERROR, MCAP_ERROR_FAILED,
  579. "Mdl in invalid state");
  580. return FALSE;
  581. }
  582. cmd = create_req(MCAP_MD_ABORT_MDL_REQ, mdl->mdlid);
  583. if (!mcap_send_std_opcode(mcl, cmd, sizeof(mcap_md_req), err)) {
  584. g_free(cmd);
  585. return FALSE;
  586. }
  587. con = g_new0(struct mcap_mdl_op_cb, 1);
  588. con->mdl = mcap_mdl_ref(mdl);
  589. con->cb.notify = abort_cb;
  590. con->destroy = destroy;
  591. con->user_data = user_data;
  592. mcl->priv_data = con;
  593. mcl->tid = timeout_add_seconds(RESPONSE_TIMER, wait_response_timer,
  594. mcl, NULL);
  595. return TRUE;
  596. }
  597. static struct mcap_mcl *find_mcl(GSList *list, const bdaddr_t *addr)
  598. {
  599. struct mcap_mcl *mcl;
  600. for (; list; list = list->next) {
  601. mcl = list->data;
  602. if (!bacmp(&mcl->addr, addr))
  603. return mcl;
  604. }
  605. return NULL;
  606. }
  607. int mcap_mdl_get_fd(struct mcap_mdl *mdl)
  608. {
  609. if (!mdl || mdl->state != MDL_CONNECTED)
  610. return -ENOTCONN;
  611. return g_io_channel_unix_get_fd(mdl->dc);
  612. }
  613. uint16_t mcap_mdl_get_mdlid(struct mcap_mdl *mdl)
  614. {
  615. if (!mdl)
  616. return MCAP_MDLID_RESERVED;
  617. return mdl->mdlid;
  618. }
  619. static void shutdown_mdl_cb(void *data, void *user_data)
  620. {
  621. shutdown_mdl(data);
  622. }
  623. static void mdl_unref_cb(void *data, void *user_data)
  624. {
  625. mcap_mdl_unref(data);
  626. }
  627. static void close_mcl(struct mcap_mcl *mcl, gboolean cache_requested)
  628. {
  629. gboolean save = ((!(mcl->ctrl & MCAP_CTRL_FREE)) && cache_requested);
  630. RELEASE_TIMER(mcl);
  631. if (mcl->cc) {
  632. g_io_channel_shutdown(mcl->cc, TRUE, NULL);
  633. g_io_channel_unref(mcl->cc);
  634. mcl->cc = NULL;
  635. }
  636. if (mcl->wid) {
  637. g_source_remove(mcl->wid);
  638. mcl->wid = 0;
  639. }
  640. if (mcl->lcmd) {
  641. g_free(mcl->lcmd);
  642. mcl->lcmd = NULL;
  643. }
  644. if (mcl->priv_data)
  645. free_mcl_priv_data(mcl);
  646. g_slist_foreach(mcl->mdls, shutdown_mdl_cb, NULL);
  647. mcap_sync_stop(mcl);
  648. mcl->state = MCL_IDLE;
  649. if (save)
  650. return;
  651. g_slist_foreach(mcl->mdls, mdl_unref_cb, NULL);
  652. g_slist_free(mcl->mdls);
  653. mcl->mdls = NULL;
  654. }
  655. static void mcap_mcl_shutdown(struct mcap_mcl *mcl)
  656. {
  657. close_mcl(mcl, TRUE);
  658. }
  659. static void mcap_mcl_release(struct mcap_mcl *mcl)
  660. {
  661. close_mcl(mcl, FALSE);
  662. }
  663. static void mcap_cache_mcl(struct mcap_mcl *mcl)
  664. {
  665. GSList *l;
  666. struct mcap_mcl *last;
  667. int len;
  668. if (mcl->ctrl & MCAP_CTRL_CACHED)
  669. return;
  670. mcl->mi->mcls = g_slist_remove(mcl->mi->mcls, mcl);
  671. if (mcl->ctrl & MCAP_CTRL_NOCACHE) {
  672. mcl->mi->cached = g_slist_remove(mcl->mi->cached, mcl);
  673. mcap_mcl_release(mcl);
  674. mcap_mcl_unref(mcl);
  675. return;
  676. }
  677. DBG("Caching MCL");
  678. len = g_slist_length(mcl->mi->cached);
  679. if (len == MAX_CACHED) {
  680. /* Remove the latest cached mcl */
  681. l = g_slist_last(mcl->mi->cached);
  682. last = l->data;
  683. mcl->mi->cached = g_slist_remove(mcl->mi->cached, last);
  684. last->ctrl &= ~MCAP_CTRL_CACHED;
  685. if (last->ctrl & MCAP_CTRL_CONN) {
  686. /*
  687. * We have to release this MCL if connection is not
  688. * successful
  689. */
  690. last->ctrl |= MCAP_CTRL_FREE;
  691. } else {
  692. mcap_mcl_release(last);
  693. last->mi->mcl_uncached_cb(last, last->mi->user_data);
  694. }
  695. mcap_mcl_unref(last);
  696. }
  697. mcl->mi->cached = g_slist_prepend(mcl->mi->cached, mcl);
  698. mcl->ctrl |= MCAP_CTRL_CACHED;
  699. mcap_mcl_shutdown(mcl);
  700. }
  701. static void mcap_uncache_mcl(struct mcap_mcl *mcl)
  702. {
  703. if (!(mcl->ctrl & MCAP_CTRL_CACHED))
  704. return;
  705. DBG("Got MCL from cache");
  706. mcl->mi->cached = g_slist_remove(mcl->mi->cached, mcl);
  707. mcl->mi->mcls = g_slist_prepend(mcl->mi->mcls, mcl);
  708. mcl->ctrl &= ~MCAP_CTRL_CACHED;
  709. mcl->ctrl &= ~MCAP_CTRL_FREE;
  710. }
  711. void mcap_close_mcl(struct mcap_mcl *mcl, gboolean cache)
  712. {
  713. if (!mcl)
  714. return;
  715. if (mcl->ctrl & MCAP_CTRL_FREE) {
  716. mcap_mcl_release(mcl);
  717. return;
  718. }
  719. if (!cache)
  720. mcl->ctrl |= MCAP_CTRL_NOCACHE;
  721. if (mcl->cc) {
  722. g_io_channel_shutdown(mcl->cc, TRUE, NULL);
  723. g_io_channel_unref(mcl->cc);
  724. mcl->cc = NULL;
  725. mcl->state = MCL_IDLE;
  726. } else if ((mcl->ctrl & MCAP_CTRL_CACHED) &&
  727. (mcl->ctrl & MCAP_CTRL_NOCACHE)) {
  728. mcl->ctrl &= ~MCAP_CTRL_CACHED;
  729. mcl->mi->cached = g_slist_remove(mcl->mi->cached, mcl);
  730. mcap_mcl_release(mcl);
  731. mcap_mcl_unref(mcl);
  732. }
  733. }
  734. struct mcap_mcl *mcap_mcl_ref(struct mcap_mcl *mcl)
  735. {
  736. mcl->ref++;
  737. DBG("mcap_mcl_ref(%p): ref=%d", mcl, mcl->ref);
  738. return mcl;
  739. }
  740. void mcap_mcl_unref(struct mcap_mcl *mcl)
  741. {
  742. mcl->ref--;
  743. DBG("mcap_mcl_unref(%p): ref=%d", mcl, mcl->ref);
  744. if (mcl->ref > 0)
  745. return;
  746. mcap_mcl_release(mcl);
  747. mcap_instance_unref(mcl->mi);
  748. g_free(mcl->cb);
  749. g_free(mcl);
  750. }
  751. static gboolean parse_set_opts(struct mcap_mdl_cb *mdl_cb, GError **err,
  752. McapMclCb cb1, va_list args)
  753. {
  754. McapMclCb cb = cb1;
  755. struct mcap_mdl_cb *c;
  756. c = g_new0(struct mcap_mdl_cb, 1);
  757. while (cb != MCAP_MDL_CB_INVALID) {
  758. switch (cb) {
  759. case MCAP_MDL_CB_CONNECTED:
  760. c->mdl_connected = va_arg(args, mcap_mdl_event_cb);
  761. break;
  762. case MCAP_MDL_CB_CLOSED:
  763. c->mdl_closed = va_arg(args, mcap_mdl_event_cb);
  764. break;
  765. case MCAP_MDL_CB_DELETED:
  766. c->mdl_deleted = va_arg(args, mcap_mdl_event_cb);
  767. break;
  768. case MCAP_MDL_CB_ABORTED:
  769. c->mdl_aborted = va_arg(args, mcap_mdl_event_cb);
  770. break;
  771. case MCAP_MDL_CB_REMOTE_CONN_REQ:
  772. c->mdl_conn_req = va_arg(args,
  773. mcap_remote_mdl_conn_req_cb);
  774. break;
  775. case MCAP_MDL_CB_REMOTE_RECONN_REQ:
  776. c->mdl_reconn_req = va_arg(args,
  777. mcap_remote_mdl_reconn_req_cb);
  778. break;
  779. case MCAP_MDL_CB_INVALID:
  780. default:
  781. g_set_error(err, MCAP_ERROR, MCAP_ERROR_INVALID_ARGS,
  782. "Unknown option %d", cb);
  783. g_free(c);
  784. return FALSE;
  785. }
  786. cb = va_arg(args, int);
  787. }
  788. /* Set new callbacks */
  789. if (c->mdl_connected)
  790. mdl_cb->mdl_connected = c->mdl_connected;
  791. if (c->mdl_closed)
  792. mdl_cb->mdl_closed = c->mdl_closed;
  793. if (c->mdl_deleted)
  794. mdl_cb->mdl_deleted = c->mdl_deleted;
  795. if (c->mdl_aborted)
  796. mdl_cb->mdl_aborted = c->mdl_aborted;
  797. if (c->mdl_conn_req)
  798. mdl_cb->mdl_conn_req = c->mdl_conn_req;
  799. if (c->mdl_reconn_req)
  800. mdl_cb->mdl_reconn_req = c->mdl_reconn_req;
  801. g_free(c);
  802. return TRUE;
  803. }
  804. gboolean mcap_mcl_set_cb(struct mcap_mcl *mcl, gpointer user_data,
  805. GError **gerr, McapMclCb cb1, ...)
  806. {
  807. va_list args;
  808. gboolean ret;
  809. va_start(args, cb1);
  810. ret = parse_set_opts(mcl->cb, gerr, cb1, args);
  811. va_end(args);
  812. if (!ret)
  813. return FALSE;
  814. mcl->cb->user_data = user_data;
  815. return TRUE;
  816. }
  817. void mcap_mcl_get_addr(struct mcap_mcl *mcl, bdaddr_t *addr)
  818. {
  819. bacpy(addr, &mcl->addr);
  820. }
  821. static void mcap_del_mdl(gpointer elem, gpointer user_data)
  822. {
  823. struct mcap_mdl *mdl = elem;
  824. gboolean notify = *(gboolean *) user_data;
  825. if (notify)
  826. mdl->mcl->cb->mdl_deleted(mdl, mdl->mcl->cb->user_data);
  827. shutdown_mdl(mdl);
  828. mcap_mdl_unref(mdl);
  829. }
  830. static gboolean check_cmd_req_length(struct mcap_mcl *mcl, void *cmd,
  831. uint32_t rlen, uint32_t explen, uint8_t rspcod)
  832. {
  833. mcap_md_req *req;
  834. uint16_t mdl_id;
  835. if (rlen != explen) {
  836. if (rlen >= sizeof(mcap_md_req)) {
  837. req = cmd;
  838. mdl_id = ntohs(req->mdl);
  839. } else {
  840. /* We can't get mdlid */
  841. mdl_id = MCAP_MDLID_RESERVED;
  842. }
  843. mcap_send_cmd(mcl, rspcod, MCAP_INVALID_PARAM_VALUE, mdl_id,
  844. NULL, 0);
  845. return FALSE;
  846. }
  847. return TRUE;
  848. }
  849. static void process_md_create_mdl_req(struct mcap_mcl *mcl, void *cmd,
  850. uint32_t len)
  851. {
  852. mcap_md_create_mdl_req *req;
  853. struct mcap_mdl *mdl;
  854. uint16_t mdl_id;
  855. uint8_t mdep_id;
  856. uint8_t cfga, conf;
  857. uint8_t rsp;
  858. if (!check_cmd_req_length(mcl, cmd, len, sizeof(mcap_md_create_mdl_req),
  859. MCAP_MD_CREATE_MDL_RSP))
  860. return;
  861. req = cmd;
  862. mdl_id = ntohs(req->mdl);
  863. if (mdl_id < MCAP_MDLID_INITIAL || mdl_id > MCAP_MDLID_FINAL) {
  864. mcap_send_cmd(mcl, MCAP_MD_CREATE_MDL_RSP, MCAP_INVALID_MDL,
  865. mdl_id, NULL, 0);
  866. return;
  867. }
  868. mdep_id = req->mdep;
  869. if (mdep_id > MCAP_MDEPID_FINAL) {
  870. mcap_send_cmd(mcl, MCAP_MD_CREATE_MDL_RSP, MCAP_INVALID_MDEP,
  871. mdl_id, NULL, 0);
  872. return;
  873. }
  874. mdl = get_mdl(mcl, mdl_id);
  875. if (mdl && (mdl->state == MDL_WAITING || mdl->state == MDL_DELETING )) {
  876. /*
  877. * Creation request arrives for a MDL that is being managed
  878. * at current moment
  879. */
  880. mcap_send_cmd(mcl, MCAP_MD_CREATE_MDL_RSP, MCAP_MDL_BUSY,
  881. mdl_id, NULL, 0);
  882. return;
  883. }
  884. cfga = conf = req->conf;
  885. /* Callback to upper layer */
  886. rsp = mcl->cb->mdl_conn_req(mcl, mdep_id, mdl_id, &conf,
  887. mcl->cb->user_data);
  888. if (mcl->state == MCL_IDLE) {
  889. /* MCL has been closed int the callback */
  890. return;
  891. }
  892. if (cfga != 0 && cfga != conf) {
  893. /*
  894. * Remote device set default configuration but upper profile
  895. * has changed it. Protocol Error: force closing the MCL by
  896. * remote device using UNSPECIFIED_ERROR response
  897. */
  898. mcap_send_cmd(mcl, MCAP_MD_CREATE_MDL_RSP,
  899. MCAP_UNSPECIFIED_ERROR, mdl_id, NULL, 0);
  900. return;
  901. }
  902. if (rsp != MCAP_SUCCESS) {
  903. mcap_send_cmd(mcl, MCAP_MD_CREATE_MDL_RSP, rsp, mdl_id,
  904. NULL, 0);
  905. return;
  906. }
  907. if (!mdl) {
  908. mdl = g_new0(struct mcap_mdl, 1);
  909. mdl->mcl = mcap_mcl_ref(mcl);
  910. mdl->mdlid = mdl_id;
  911. mcl->mdls = g_slist_insert_sorted(mcl->mdls, mcap_mdl_ref(mdl),
  912. compare_mdl);
  913. } else if (mdl->state == MDL_CONNECTED) {
  914. /*
  915. * MCAP specification says that we should close the MCL if
  916. * it is open when we receive a MD_CREATE_MDL_REQ
  917. */
  918. shutdown_mdl(mdl);
  919. }
  920. mdl->mdep_id = mdep_id;
  921. mdl->state = MDL_WAITING;
  922. mcl->state = MCL_PENDING;
  923. mcap_send_cmd(mcl, MCAP_MD_CREATE_MDL_RSP, MCAP_SUCCESS, mdl_id,
  924. &conf, 1);
  925. }
  926. static void process_md_reconnect_mdl_req(struct mcap_mcl *mcl, void *cmd,
  927. uint32_t len)
  928. {
  929. mcap_md_req *req;
  930. struct mcap_mdl *mdl;
  931. uint16_t mdl_id;
  932. uint8_t rsp;
  933. if (!check_cmd_req_length(mcl, cmd, len, sizeof(mcap_md_req),
  934. MCAP_MD_RECONNECT_MDL_RSP))
  935. return;
  936. req = cmd;
  937. mdl_id = ntohs(req->mdl);
  938. mdl = get_mdl(mcl, mdl_id);
  939. if (!mdl) {
  940. mcap_send_cmd(mcl, MCAP_MD_RECONNECT_MDL_RSP, MCAP_INVALID_MDL,
  941. mdl_id, NULL, 0);
  942. return;
  943. } else if (mdl->state == MDL_WAITING || mdl->state == MDL_DELETING ) {
  944. /*
  945. * Creation request arrives for a MDL that is being managed
  946. * at current moment
  947. */
  948. mcap_send_cmd(mcl, MCAP_MD_RECONNECT_MDL_RSP, MCAP_MDL_BUSY,
  949. mdl_id, NULL, 0);
  950. return;
  951. }
  952. /* Callback to upper layer */
  953. rsp = mcl->cb->mdl_reconn_req(mdl, mcl->cb->user_data);
  954. if (mcl->state == MCL_IDLE)
  955. return;
  956. if (rsp != MCAP_SUCCESS) {
  957. mcap_send_cmd(mcl, MCAP_MD_RECONNECT_MDL_RSP, rsp, mdl_id,
  958. NULL, 0);
  959. return;
  960. }
  961. if (mdl->state == MDL_CONNECTED)
  962. shutdown_mdl(mdl);
  963. mdl->state = MDL_WAITING;
  964. mcl->state = MCL_PENDING;
  965. mcap_send_cmd(mcl, MCAP_MD_RECONNECT_MDL_RSP, MCAP_SUCCESS, mdl_id,
  966. NULL, 0);
  967. }
  968. static void process_md_abort_mdl_req(struct mcap_mcl *mcl, void *cmd,
  969. uint32_t len)
  970. {
  971. mcap_md_req *req;
  972. GSList *l;
  973. struct mcap_mdl *mdl, *abrt;
  974. uint16_t mdl_id;
  975. if (!check_cmd_req_length(mcl, cmd, len, sizeof(mcap_md_req),
  976. MCAP_MD_ABORT_MDL_RSP))
  977. return;
  978. req = cmd;
  979. mdl_id = ntohs(req->mdl);
  980. mcl->state = MCL_CONNECTED;
  981. abrt = NULL;
  982. for (l = mcl->mdls; l; l = l->next) {
  983. mdl = l->data;
  984. if (mdl_id == mdl->mdlid && mdl->state == MDL_WAITING) {
  985. abrt = mdl;
  986. if (mcl->state != MCL_CONNECTED)
  987. break;
  988. continue;
  989. }
  990. if (mdl->state == MDL_CONNECTED && mcl->state != MCL_ACTIVE)
  991. mcl->state = MCL_ACTIVE;
  992. if (abrt && mcl->state == MCL_ACTIVE)
  993. break;
  994. }
  995. if (!abrt) {
  996. mcap_send_cmd(mcl, MCAP_MD_ABORT_MDL_RSP, MCAP_INVALID_MDL,
  997. mdl_id, NULL, 0);
  998. return;
  999. }
  1000. mcl->cb->mdl_aborted(abrt, mcl->cb->user_data);
  1001. abrt->state = MDL_CLOSED;
  1002. mcap_send_cmd(mcl, MCAP_MD_ABORT_MDL_RSP, MCAP_SUCCESS, mdl_id,
  1003. NULL, 0);
  1004. }
  1005. static void process_md_delete_mdl_req(struct mcap_mcl *mcl, void *cmd,
  1006. uint32_t len)
  1007. {
  1008. mcap_md_req *req;
  1009. struct mcap_mdl *mdl, *aux;
  1010. uint16_t mdlid;
  1011. gboolean notify;
  1012. GSList *l;
  1013. if (!check_cmd_req_length(mcl, cmd, len, sizeof(mcap_md_req),
  1014. MCAP_MD_DELETE_MDL_RSP))
  1015. return;
  1016. req = cmd;
  1017. mdlid = ntohs(req->mdl);
  1018. if (mdlid == MCAP_ALL_MDLIDS) {
  1019. notify = TRUE;
  1020. g_slist_foreach(mcl->mdls, mcap_del_mdl, &notify);
  1021. g_slist_free(mcl->mdls);
  1022. mcl->mdls = NULL;
  1023. mcl->state = MCL_CONNECTED;
  1024. goto resp;
  1025. }
  1026. if (mdlid < MCAP_MDLID_INITIAL || mdlid > MCAP_MDLID_FINAL) {
  1027. mcap_send_cmd(mcl, MCAP_MD_DELETE_MDL_RSP, MCAP_INVALID_MDL,
  1028. mdlid, NULL, 0);
  1029. return;
  1030. }
  1031. for (l = mcl->mdls, mdl = NULL; l; l = l->next) {
  1032. aux = l->data;
  1033. if (aux->mdlid == mdlid) {
  1034. mdl = aux;
  1035. break;
  1036. }
  1037. }
  1038. if (!mdl || mdl->state == MDL_WAITING) {
  1039. mcap_send_cmd(mcl, MCAP_MD_DELETE_MDL_RSP, MCAP_INVALID_MDL,
  1040. mdlid, NULL, 0);
  1041. return;
  1042. }
  1043. mcl->mdls = g_slist_remove(mcl->mdls, mdl);
  1044. update_mcl_state(mcl);
  1045. notify = TRUE;
  1046. mcap_del_mdl(mdl, &notify);
  1047. resp:
  1048. mcap_send_cmd(mcl, MCAP_MD_DELETE_MDL_RSP, MCAP_SUCCESS, mdlid,
  1049. NULL, 0);
  1050. }
  1051. static void invalid_req_state(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
  1052. {
  1053. uint16_t mdlr;
  1054. error("Invalid cmd received (op code = %d) in state %d", cmd[0],
  1055. mcl->state);
  1056. /*
  1057. * Get previously mdlid sent to generate an appropriate
  1058. * response if it is possible
  1059. */
  1060. mdlr = len < sizeof(mcap_md_req) ? MCAP_MDLID_RESERVED :
  1061. ntohs(((mcap_md_req *) cmd)->mdl);
  1062. mcap_send_cmd(mcl, cmd[0]+1, MCAP_INVALID_OPERATION, mdlr, NULL, 0);
  1063. }
  1064. /* Function used to process commands depending of MCL state */
  1065. static void proc_req_connected(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
  1066. {
  1067. switch (cmd[0]) {
  1068. case MCAP_MD_CREATE_MDL_REQ:
  1069. process_md_create_mdl_req(mcl, cmd, len);
  1070. break;
  1071. case MCAP_MD_RECONNECT_MDL_REQ:
  1072. process_md_reconnect_mdl_req(mcl, cmd, len);
  1073. break;
  1074. case MCAP_MD_DELETE_MDL_REQ:
  1075. process_md_delete_mdl_req(mcl, cmd, len);
  1076. break;
  1077. default:
  1078. invalid_req_state(mcl, cmd, len);
  1079. }
  1080. }
  1081. static void proc_req_pending(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
  1082. {
  1083. if (cmd[0] == MCAP_MD_ABORT_MDL_REQ)
  1084. process_md_abort_mdl_req(mcl, cmd, len);
  1085. else
  1086. invalid_req_state(mcl, cmd, len);
  1087. }
  1088. static void proc_req_active(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
  1089. {
  1090. switch (cmd[0]) {
  1091. case MCAP_MD_CREATE_MDL_REQ:
  1092. process_md_create_mdl_req(mcl, cmd, len);
  1093. break;
  1094. case MCAP_MD_RECONNECT_MDL_REQ:
  1095. process_md_reconnect_mdl_req(mcl, cmd, len);
  1096. break;
  1097. case MCAP_MD_DELETE_MDL_REQ:
  1098. process_md_delete_mdl_req(mcl, cmd, len);
  1099. break;
  1100. default:
  1101. invalid_req_state(mcl, cmd, len);
  1102. }
  1103. }
  1104. /* Function used to process replies */
  1105. static gboolean check_err_rsp(struct mcap_mcl *mcl, mcap_rsp *rsp,
  1106. uint32_t rlen, uint32_t len, GError **gerr)
  1107. {
  1108. mcap_md_req *cmdlast = (mcap_md_req *) mcl->lcmd;
  1109. int err = MCAP_ERROR_FAILED;
  1110. gboolean close = FALSE;
  1111. char *msg;
  1112. if (rsp->op == MCAP_ERROR_RSP) {
  1113. msg = "MCAP_ERROR_RSP received";
  1114. close = FALSE;
  1115. goto fail;
  1116. }
  1117. /* Check if the response matches with the last request */
  1118. if (rlen < sizeof(mcap_rsp) || (mcl->lcmd[0] + 1) != rsp->op) {
  1119. msg = "Protocol error";
  1120. close = FALSE;
  1121. goto fail;
  1122. }
  1123. if (rlen < len) {
  1124. msg = "Protocol error";
  1125. close = FALSE;
  1126. goto fail;
  1127. }
  1128. if (rsp->mdl != cmdlast->mdl) {
  1129. msg = "MDLID received doesn't match with MDLID sent";
  1130. close = TRUE;
  1131. goto fail;
  1132. }
  1133. if (rsp->rc == MCAP_REQUEST_NOT_SUPPORTED) {
  1134. msg = "Remote does not support opcodes";
  1135. mcl->ctrl &= ~MCAP_CTRL_STD_OP;
  1136. goto fail;
  1137. }
  1138. if (rsp->rc == MCAP_UNSPECIFIED_ERROR) {
  1139. msg = "Unspecified error";
  1140. close = TRUE;
  1141. goto fail;
  1142. }
  1143. if (rsp->rc != MCAP_SUCCESS) {
  1144. msg = error2str(rsp->rc);
  1145. err = rsp->rc;
  1146. goto fail;
  1147. }
  1148. return FALSE;
  1149. fail:
  1150. g_set_error(gerr, MCAP_ERROR, err, "%s", msg);
  1151. return close;
  1152. }
  1153. static gboolean process_md_create_mdl_rsp(struct mcap_mcl *mcl,
  1154. mcap_rsp *rsp, uint32_t len)
  1155. {
  1156. mcap_md_create_mdl_req *cmdlast = (mcap_md_create_mdl_req *) mcl->lcmd;
  1157. struct mcap_mdl_op_cb *conn = mcl->priv_data;
  1158. mcap_mdl_operation_conf_cb connect_cb = conn->cb.op_conf;
  1159. gpointer user_data = conn->user_data;
  1160. struct mcap_mdl *mdl = conn->mdl;
  1161. uint8_t conf = cmdlast->conf;
  1162. gboolean close;
  1163. GError *gerr = NULL;
  1164. close = check_err_rsp(mcl, rsp, len, sizeof(mcap_rsp) + 1, &gerr);
  1165. g_free(mcl->lcmd);
  1166. mcl->lcmd = NULL;
  1167. mcl->req = MCL_AVAILABLE;
  1168. if (gerr)
  1169. goto fail;
  1170. /* Check if preferences changed */
  1171. if (conf != 0x00 && rsp->data[0] != conf) {
  1172. g_set_error(&gerr, MCAP_ERROR, MCAP_ERROR_FAILED,
  1173. "Configuration changed");
  1174. close = TRUE;
  1175. goto fail;
  1176. }
  1177. connect_cb(mdl, rsp->data[0], gerr, user_data);
  1178. return close;
  1179. fail:
  1180. connect_cb(NULL, 0, gerr, user_data);
  1181. mcl->mdls = g_slist_remove(mcl->mdls, mdl);
  1182. mcap_mdl_unref(mdl);
  1183. g_error_free(gerr);
  1184. update_mcl_state(mcl);
  1185. return close;
  1186. }
  1187. static gboolean process_md_reconnect_mdl_rsp(struct mcap_mcl *mcl,
  1188. mcap_rsp *rsp, uint32_t len)
  1189. {
  1190. struct mcap_mdl_op_cb *reconn = mcl->priv_data;
  1191. mcap_mdl_operation_cb reconn_cb = reconn->cb.op;
  1192. gpointer user_data = reconn->user_data;
  1193. struct mcap_mdl *mdl = reconn->mdl;
  1194. GError *gerr = NULL;
  1195. gboolean close;
  1196. close = check_err_rsp(mcl, rsp, len, sizeof(mcap_rsp), &gerr);
  1197. g_free(mcl->lcmd);
  1198. mcl->lcmd = NULL;
  1199. mcl->req = MCL_AVAILABLE;
  1200. reconn_cb(mdl, gerr, user_data);
  1201. if (!gerr)
  1202. return close;
  1203. g_error_free(gerr);
  1204. shutdown_mdl(mdl);
  1205. update_mcl_state(mcl);
  1206. if (rsp->rc != MCAP_INVALID_MDL)
  1207. return close;
  1208. /* Remove cached mdlid */
  1209. mcl->mdls = g_slist_remove(mcl->mdls, mdl);
  1210. mcl->cb->mdl_deleted(mdl, mcl->cb->user_data);
  1211. mcap_mdl_unref(mdl);
  1212. return close;
  1213. }
  1214. static gboolean process_md_abort_mdl_rsp(struct mcap_mcl *mcl,
  1215. mcap_rsp *rsp, uint32_t len)
  1216. {
  1217. struct mcap_mdl_op_cb *abrt = mcl->priv_data;
  1218. mcap_mdl_notify_cb abrt_cb = abrt->cb.notify;
  1219. gpointer user_data = abrt->user_data;
  1220. struct mcap_mdl *mdl = abrt->mdl;
  1221. GError *gerr = NULL;
  1222. gboolean close;
  1223. close = check_err_rsp(mcl, rsp, len, sizeof(mcap_rsp), &gerr);
  1224. g_free(mcl->lcmd);
  1225. mcl->lcmd = NULL;
  1226. mcl->req = MCL_AVAILABLE;
  1227. abrt_cb(gerr, user_data);
  1228. shutdown_mdl(mdl);
  1229. if (len >= sizeof(mcap_rsp) && rsp->rc == MCAP_INVALID_MDL) {
  1230. mcl->mdls = g_slist_remove(mcl->mdls, mdl);
  1231. mcl->cb->mdl_deleted(mdl, mcl->cb->user_data);
  1232. mcap_mdl_unref(mdl);
  1233. }
  1234. if (gerr)
  1235. g_error_free(gerr);
  1236. update_mcl_state(mcl);
  1237. return close;
  1238. }
  1239. static void restore_mdl(gpointer elem, gpointer data)
  1240. {
  1241. struct mcap_mdl *mdl = elem;
  1242. if (mdl->state == MDL_DELETING) {
  1243. if (mdl->dc)
  1244. mdl->state = MDL_CONNECTED;
  1245. else
  1246. mdl->state = MDL_CLOSED;
  1247. } else if (mdl->state == MDL_CLOSED)
  1248. mdl->mcl->cb->mdl_closed(mdl, mdl->mcl->cb->user_data);
  1249. }
  1250. static void check_mdl_del_err(struct mcap_mdl *mdl, mcap_rsp *rsp)
  1251. {
  1252. if (rsp->rc != MCAP_ERROR_INVALID_MDL) {
  1253. restore_mdl(mdl, NULL);
  1254. return;
  1255. }
  1256. /* MDL does not exist in remote side, we can delete it */
  1257. mdl->mcl->mdls = g_slist_remove(mdl->mcl->mdls, mdl);
  1258. mcap_mdl_unref(mdl);
  1259. }
  1260. static gboolean process_md_delete_mdl_rsp(struct mcap_mcl *mcl, mcap_rsp *rsp,
  1261. uint32_t len)
  1262. {
  1263. struct mcap_mdl_op_cb *del = mcl->priv_data;
  1264. struct mcap_mdl *mdl = del->mdl;
  1265. mcap_mdl_notify_cb deleted_cb = del->cb.notify;
  1266. gpointer user_data = del->user_data;
  1267. mcap_md_req *cmdlast = (mcap_md_req *) mcl->lcmd;
  1268. uint16_t mdlid = ntohs(cmdlast->mdl);
  1269. GError *gerr = NULL;
  1270. gboolean close;
  1271. gboolean notify = FALSE;
  1272. close = check_err_rsp(mcl, rsp, len, sizeof(mcap_rsp), &gerr);
  1273. g_free(mcl->lcmd);
  1274. mcl->lcmd = NULL;
  1275. mcl->req = MCL_AVAILABLE;
  1276. if (gerr) {
  1277. if (mdl)
  1278. check_mdl_del_err(mdl, rsp);
  1279. else
  1280. g_slist_foreach(mcl->mdls, restore_mdl, NULL);
  1281. deleted_cb(gerr, user_data);
  1282. g_error_free(gerr);
  1283. return close;
  1284. }
  1285. if (mdlid == MCAP_ALL_MDLIDS) {
  1286. g_slist_foreach(mcl->mdls, mcap_del_mdl, &notify);
  1287. g_slist_free(mcl->mdls);
  1288. mcl->mdls = NULL;
  1289. mcl->state = MCL_CONNECTED;
  1290. } else {
  1291. mcl->mdls = g_slist_remove(mcl->mdls, mdl);
  1292. update_mcl_state(mcl);
  1293. mcap_del_mdl(mdl, &notify);
  1294. }
  1295. deleted_cb(gerr, user_data);
  1296. return close;
  1297. }
  1298. static void post_process_rsp(struct mcap_mcl *mcl, struct mcap_mdl_op_cb *op)
  1299. {
  1300. if (mcl->priv_data != op) {
  1301. /*
  1302. * Queued MCAP request in some callback.
  1303. * We should not delete the mcl private data
  1304. */
  1305. free_mcap_mdl_op(op);
  1306. } else {
  1307. /*
  1308. * This is not a queued request. It's safe
  1309. * delete the mcl private data here.
  1310. */
  1311. free_mcl_priv_data(mcl);
  1312. }
  1313. }
  1314. static void proc_response(struct mcap_mcl *mcl, void *buf, uint32_t len)
  1315. {
  1316. struct mcap_mdl_op_cb *op = mcl->priv_data;
  1317. mcap_rsp *rsp = buf;
  1318. gboolean close;
  1319. RELEASE_TIMER(mcl);
  1320. switch (mcl->lcmd[0] + 1) {
  1321. case MCAP_MD_CREATE_MDL_RSP:
  1322. close = process_md_create_mdl_rsp(mcl, rsp, len);
  1323. post_process_rsp(mcl, op);
  1324. break;
  1325. case MCAP_MD_RECONNECT_MDL_RSP:
  1326. close = process_md_reconnect_mdl_rsp(mcl, rsp, len);
  1327. post_process_rsp(mcl, op);
  1328. break;
  1329. case MCAP_MD_ABORT_MDL_RSP:
  1330. close = process_md_abort_mdl_rsp(mcl, rsp, len);
  1331. post_process_rsp(mcl, op);
  1332. break;
  1333. case MCAP_MD_DELETE_MDL_RSP:
  1334. close = process_md_delete_mdl_rsp(mcl, rsp, len);
  1335. post_process_rsp(mcl, op);
  1336. break;
  1337. default:
  1338. DBG("Unknown cmd response received (op code = %d)", rsp->op);
  1339. close = TRUE;
  1340. break;
  1341. }
  1342. if (close) {
  1343. mcl->mi->mcl_disconnected_cb(mcl, mcl->mi->user_data);
  1344. mcap_cache_mcl(mcl);
  1345. }
  1346. }
  1347. static void proc_cmd(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
  1348. {
  1349. GError *gerr = NULL;
  1350. if (cmd[0] > MCAP_MD_SYNC_INFO_IND ||
  1351. (cmd[0] > MCAP_MD_DELETE_MDL_RSP &&
  1352. cmd[0] < MCAP_MD_SYNC_CAP_REQ)) {
  1353. error("Unknown cmd received (op code = %d)", cmd[0]);
  1354. mcap_send_cmd(mcl, MCAP_ERROR_RSP, MCAP_INVALID_OP_CODE,
  1355. MCAP_MDLID_RESERVED, NULL, 0);
  1356. return;
  1357. }
  1358. if (cmd[0] >= MCAP_MD_SYNC_CAP_REQ &&
  1359. cmd[0] <= MCAP_MD_SYNC_INFO_IND) {
  1360. proc_sync_cmd(mcl, cmd, len);
  1361. return;
  1362. }
  1363. if (!(mcl->ctrl & MCAP_CTRL_STD_OP)) {
  1364. /* In case the remote device doesn't work correctly */
  1365. error("Remote device does not support opcodes, cmd ignored");
  1366. return;
  1367. }
  1368. if (mcl->req == MCL_WAITING_RSP) {
  1369. if (cmd[0] & 0x01) {
  1370. /* Request arrived when a response is expected */
  1371. if (mcl->role == MCL_INITIATOR)
  1372. /* ignore */
  1373. return;
  1374. /* Initiator will ignore our last request */
  1375. RELEASE_TIMER(mcl);
  1376. mcl->req = MCL_AVAILABLE;
  1377. g_set_error(&gerr, MCAP_ERROR, MCAP_ERROR_REQ_IGNORED,
  1378. "Initiator sent a request with more priority");
  1379. mcap_notify_error(mcl, gerr);
  1380. proc_req[mcl->state](mcl, cmd, len);
  1381. return;
  1382. }
  1383. proc_response(mcl, cmd, len);
  1384. } else if (cmd[0] & 0x01)
  1385. proc_req[mcl->state](mcl, cmd, len);
  1386. }
  1387. static gboolean mdl_event_cb(GIOChannel *chan, GIOCondition cond, gpointer data)
  1388. {
  1389. struct mcap_mdl *mdl = data;
  1390. gboolean notify;
  1391. DBG("Close MDL %d", mdl->mdlid);
  1392. notify = (mdl->state == MDL_CONNECTED);
  1393. shutdown_mdl(mdl);
  1394. update_mcl_state(mdl->mcl);
  1395. if (notify) {
  1396. /*Callback to upper layer */
  1397. mdl->mcl->cb->mdl_closed(mdl, mdl->mcl->cb->user_data);
  1398. }
  1399. return FALSE;
  1400. }
  1401. static void mcap_connect_mdl_cb(GIOChannel *chan, GError *conn_err,
  1402. gpointer data)
  1403. {
  1404. struct mcap_mdl_op_cb *con = data;
  1405. struct mcap_mdl *mdl = con->mdl;
  1406. mcap_mdl_operation_cb cb = con->cb.op;
  1407. gpointer user_data = con->user_data;
  1408. DBG("mdl connect callback");
  1409. if (conn_err) {
  1410. DBG("ERROR: mdl connect callback");
  1411. mdl->state = MDL_CLOSED;
  1412. g_io_channel_unref(mdl->dc);
  1413. mdl->dc = NULL;
  1414. cb(mdl, conn_err, user_data);
  1415. return;
  1416. }
  1417. mdl->state = MDL_CONNECTED;
  1418. mdl->wid = g_io_add_watch_full(mdl->dc, G_PRIORITY_DEFAULT,
  1419. G_IO_ERR | G_IO_HUP | G_IO_NVAL,
  1420. (GIOFunc) mdl_event_cb,
  1421. mcap_mdl_ref(mdl),
  1422. (GDestroyNotify) mcap_mdl_unref);
  1423. cb(mdl, conn_err, user_data);
  1424. }
  1425. gboolean mcap_connect_mdl(struct mcap_mdl *mdl, uint8_t mode,
  1426. uint16_t dcpsm,
  1427. mcap_mdl_operation_cb connect_cb,
  1428. gpointer user_data,
  1429. GDestroyNotify destroy,
  1430. GError **err)
  1431. {
  1432. struct mcap_mdl_op_cb *con;
  1433. if (mdl->state != MDL_WAITING) {
  1434. g_set_error(err, MCAP_ERROR, MCAP_ERROR_INVALID_MDL,
  1435. "%s", error2str(MCAP_INVALID_MDL));
  1436. return FALSE;
  1437. }
  1438. if ((mode != BT_IO_MODE_ERTM) && (mode != BT_IO_MODE_STREAMING)) {
  1439. g_set_error(err, MCAP_ERROR, MCAP_ERROR_INVALID_ARGS,
  1440. "Invalid MDL configuration");
  1441. return FALSE;
  1442. }
  1443. con = g_new0(struct mcap_mdl_op_cb, 1);
  1444. con->mdl = mcap_mdl_ref(mdl);
  1445. con->cb.op = connect_cb;
  1446. con->destroy = destroy;
  1447. con->user_data = user_data;
  1448. mdl->dc = bt_io_connect(mcap_connect_mdl_cb, con,
  1449. (GDestroyNotify) free_mcap_mdl_op, err,
  1450. BT_IO_OPT_SOURCE_BDADDR, &mdl->mcl->mi->src,
  1451. BT_IO_OPT_DEST_BDADDR, &mdl->mcl->addr,
  1452. BT_IO_OPT_PSM, dcpsm,
  1453. BT_IO_OPT_MTU, MCAP_DC_MTU,
  1454. BT_IO_OPT_SEC_LEVEL, mdl->mcl->mi->sec,
  1455. BT_IO_OPT_MODE, mode,
  1456. BT_IO_OPT_INVALID);
  1457. if (!mdl->dc) {
  1458. DBG("MDL Connection error");
  1459. mdl->state = MDL_CLOSED;
  1460. mcap_mdl_unref(con->mdl);
  1461. g_free(con);
  1462. return FALSE;
  1463. }
  1464. return TRUE;
  1465. }
  1466. static gboolean mcl_control_cb(GIOChannel *chan, GIOCondition cond,
  1467. gpointer data)
  1468. {
  1469. GError *gerr = NULL;
  1470. struct mcap_mcl *mcl = data;
  1471. int sk, len;
  1472. uint8_t buf[MCAP_CC_MTU];
  1473. if (cond & (G_IO_ERR | G_IO_HUP | G_IO_NVAL))
  1474. goto fail;
  1475. sk = g_io_channel_unix_get_fd(chan);
  1476. len = read(sk, buf, sizeof(buf));
  1477. if (len < 0)
  1478. goto fail;
  1479. proc_cmd(mcl, buf, (uint32_t) len);
  1480. return TRUE;
  1481. fail:
  1482. if (mcl->state != MCL_IDLE) {
  1483. if (mcl->req == MCL_WAITING_RSP) {
  1484. /* notify error in pending callback */
  1485. g_set_error(&gerr, MCAP_ERROR, MCAP_ERROR_MCL_CLOSED,
  1486. "MCL closed");
  1487. mcap_notify_error(mcl, gerr);
  1488. g_error_free(gerr);
  1489. }
  1490. mcl->mi->mcl_disconnected_cb(mcl, mcl->mi->user_data);
  1491. }
  1492. mcap_cache_mcl(mcl);
  1493. return FALSE;
  1494. }
  1495. static void mcap_connect_mcl_cb(GIOChannel *chan, GError *conn_err,
  1496. gpointer user_data)
  1497. {
  1498. char dstaddr[18];
  1499. struct connect_mcl *con = user_data;
  1500. struct mcap_mcl *aux, *mcl = con->mcl;
  1501. mcap_mcl_connect_cb connect_cb = con->connect_cb;
  1502. gpointer data = con->user_data;
  1503. GError *gerr = NULL;
  1504. mcl->ctrl &= ~MCAP_CTRL_CONN;
  1505. if (conn_err) {
  1506. if (mcl->ctrl & MCAP_CTRL_FREE) {
  1507. mcap_mcl_release(mcl);
  1508. mcl->mi->mcl_uncached_cb(mcl, mcl->mi->user_data);
  1509. }
  1510. connect_cb(NULL, conn_err, data);
  1511. return;
  1512. }
  1513. ba2str(&mcl->addr, dstaddr);
  1514. aux = find_mcl(mcl->mi->mcls, &mcl->addr);
  1515. if (aux) {
  1516. /* Double MCL connection case */
  1517. error("MCL error: Device %s is already connected", dstaddr);
  1518. g_set_error(&gerr, MCAP_ERROR, MCAP_ERROR_ALREADY_EXISTS,
  1519. "MCL %s is already connected", dstaddr);
  1520. connect_cb(NULL, gerr, data);
  1521. g_error_free(gerr);
  1522. return;
  1523. }
  1524. mcl->state = MCL_CONNECTED;
  1525. mcl->role = MCL_INITIATOR;
  1526. mcl->req = MCL_AVAILABLE;
  1527. mcl->ctrl |= MCAP_CTRL_STD_OP;
  1528. mcap_sync_init(mcl);
  1529. if (mcl->ctrl & MCAP_CTRL_CACHED)
  1530. mcap_uncache_mcl(mcl);
  1531. else {
  1532. mcl->ctrl &= ~MCAP_CTRL_FREE;
  1533. mcl->mi->mcls = g_slist_prepend(mcl->mi->mcls,
  1534. mcap_mcl_ref(mcl));
  1535. }
  1536. mcl->wid = g_io_add_watch_full(mcl->cc, G_PRIORITY_DEFAULT,
  1537. G_IO_IN | G_IO_ERR | G_IO_HUP | G_IO_NVAL,
  1538. (GIOFunc) mcl_control_cb,
  1539. mcap_mcl_ref(mcl),
  1540. (GDestroyNotify) mcap_mcl_unref);
  1541. connect_cb(mcl, gerr, data);
  1542. }
  1543. static void set_mdl_properties(GIOChannel *chan, struct mcap_mdl *mdl)
  1544. {
  1545. struct mcap_mcl *mcl = mdl->mcl;
  1546. mdl->state = MDL_CONNECTED;
  1547. mdl->dc = g_io_channel_ref(chan);
  1548. mdl->wid = g_io_add_watch_full(mdl->dc, G_PRIORITY_DEFAULT,
  1549. G_IO_ERR | G_IO_HUP | G_IO_NVAL,
  1550. (GIOFunc) mdl_event_cb,
  1551. mcap_mdl_ref(mdl),
  1552. (GDestroyNotify) mcap_mdl_unref);
  1553. mcl->state = MCL_ACTIVE;
  1554. mcl->cb->mdl_connected(mdl, mcl->cb->user_data);
  1555. }
  1556. static void mcl_io_destroy(gpointer data)
  1557. {
  1558. struct connect_mcl *con = data;
  1559. mcap_mcl_unref(con->mcl);
  1560. if (con->destroy)
  1561. con->destroy(con->user_data);
  1562. g_free(con);
  1563. }
  1564. gboolean mcap_create_mcl(struct mcap_instance *mi,
  1565. const bdaddr_t *addr,
  1566. uint16_t ccpsm,
  1567. mcap_mcl_connect_cb connect_cb,
  1568. gpointer user_data,
  1569. GDestroyNotify destroy,
  1570. GError **err)
  1571. {
  1572. struct mcap_mcl *mcl;
  1573. struct connect_mcl *con;
  1574. mcl = find_mcl(mi->mcls, addr);
  1575. if (mcl) {
  1576. g_set_error(err, MCAP_ERROR, MCAP_ERROR_ALREADY_EXISTS,
  1577. "MCL is already connected.");
  1578. return FALSE;
  1579. }
  1580. mcl = find_mcl(mi->cached, addr);
  1581. if (!mcl) {
  1582. mcl = g_new0(struct mcap_mcl, 1);
  1583. mcl->mi = mcap_instance_ref(mi);
  1584. mcl->state = MCL_IDLE;
  1585. bacpy(&mcl->addr, addr);
  1586. set_default_cb(mcl);
  1587. mcl->next_mdl = (rand() % MCAP_MDLID_FINAL) + 1;
  1588. }
  1589. mcl->ctrl |= MCAP_CTRL_CONN;
  1590. con = g_new0(struct connect_mcl, 1);
  1591. con->mcl = mcap_mcl_ref(mcl);
  1592. con->connect_cb = connect_cb;
  1593. con->destroy = destroy;
  1594. con->user_data = user_data;
  1595. mcl->cc = bt_io_connect(mcap_connect_mcl_cb, con,
  1596. mcl_io_destroy, err,
  1597. BT_IO_OPT_SOURCE_BDADDR, &mi->src,
  1598. BT_IO_OPT_DEST_BDADDR, addr,
  1599. BT_IO_OPT_PSM, ccpsm,
  1600. BT_IO_OPT_MTU, MCAP_CC_MTU,
  1601. BT_IO_OPT_SEC_LEVEL, mi->sec,
  1602. BT_IO_OPT_MODE, BT_IO_MODE_ERTM,
  1603. BT_IO_OPT_INVALID);
  1604. if (!mcl->cc) {
  1605. mcl->ctrl &= ~MCAP_CTRL_CONN;
  1606. if (mcl->ctrl & MCAP_CTRL_FREE) {
  1607. mcap_mcl_release(mcl);
  1608. mcl->mi->mcl_uncached_cb(mcl, mcl->mi->user_data);
  1609. }
  1610. mcap_mcl_unref(con->mcl);
  1611. g_free(con);
  1612. return FALSE;
  1613. }
  1614. return TRUE;
  1615. }
  1616. static void connect_dc_event_cb(GIOChannel *chan, GError *gerr,
  1617. gpointer user_data)
  1618. {
  1619. struct mcap_instance *mi = user_data;
  1620. struct mcap_mcl *mcl;
  1621. struct mcap_mdl *mdl;
  1622. GError *err = NULL;
  1623. bdaddr_t dst;
  1624. GSList *l;
  1625. if (gerr)
  1626. return;
  1627. bt_io_get(chan, &err, BT_IO_OPT_DEST_BDADDR, &dst, BT_IO_OPT_INVALID);
  1628. if (err) {
  1629. error("%s", err->message);
  1630. g_error_free(err);
  1631. goto drop;
  1632. }
  1633. mcl = find_mcl(mi->mcls, &dst);
  1634. if (!mcl || mcl->state != MCL_PENDING)
  1635. goto drop;
  1636. for (l = mcl->mdls; l; l = l->next) {
  1637. mdl = l->data;
  1638. if (mdl->state == MDL_WAITING) {
  1639. set_mdl_properties(chan, mdl);
  1640. return;
  1641. }
  1642. }
  1643. drop:
  1644. g_io_channel_shutdown(chan, TRUE, NULL);
  1645. }
  1646. static void set_mcl_conf(GIOChannel *chan, struct mcap_mcl *mcl)
  1647. {
  1648. gboolean reconn;
  1649. mcl->state = MCL_CONNECTED;
  1650. mcl->role = MCL_ACCEPTOR;
  1651. mcl->req = MCL_AVAILABLE;
  1652. mcl->cc = g_io_channel_ref(chan);
  1653. mcl->ctrl |= MCAP_CTRL_STD_OP;
  1654. mcap_sync_init(mcl);
  1655. reconn = (mcl->ctrl & MCAP_CTRL_CACHED);
  1656. if (reconn)
  1657. mcap_uncache_mcl(mcl);
  1658. else
  1659. mcl->mi->mcls = g_slist_prepend(mcl->mi->mcls,
  1660. mcap_mcl_ref(mcl));
  1661. mcl->wid = g_io_add_watch_full(mcl->cc, G_PRIORITY_DEFAULT,
  1662. G_IO_IN | G_IO_ERR | G_IO_HUP | G_IO_NVAL,
  1663. (GIOFunc) mcl_control_cb,
  1664. mcap_mcl_ref(mcl),
  1665. (GDestroyNotify) mcap_mcl_unref);
  1666. /* Callback to report new MCL */
  1667. if (reconn)
  1668. mcl->mi->mcl_reconnected_cb(mcl, mcl->mi->user_data);
  1669. else
  1670. mcl->mi->mcl_connected_cb(mcl, mcl->mi->user_data);
  1671. }
  1672. static void connect_mcl_event_cb(GIOChannel *chan, GError *gerr,
  1673. gpointer user_data)
  1674. {
  1675. struct mcap_instance *mi = user_data;
  1676. struct mcap_mcl *mcl;
  1677. bdaddr_t dst;
  1678. char address[18], srcstr[18];
  1679. GError *err = NULL;
  1680. if (gerr)
  1681. return;
  1682. bt_io_get(chan, &err,
  1683. BT_IO_OPT_DEST_BDADDR, &dst,
  1684. BT_IO_OPT_DEST, address,
  1685. BT_IO_OPT_INVALID);
  1686. if (err) {
  1687. error("%s", err->message);
  1688. g_error_free(err);
  1689. goto drop;
  1690. }
  1691. ba2str(&mi->src, srcstr);
  1692. mcl = find_mcl(mi->mcls, &dst);
  1693. if (mcl) {
  1694. error("Control channel already created with %s on adapter %s",
  1695. address, srcstr);
  1696. goto drop;
  1697. }
  1698. mcl = find_mcl(mi->cached, &dst);
  1699. if (!mcl) {
  1700. mcl = g_new0(struct mcap_mcl, 1);
  1701. mcl->mi = mcap_instance_ref(mi);
  1702. bacpy(&mcl->addr, &dst);
  1703. set_default_cb(mcl);
  1704. mcl->next_mdl = (rand() % MCAP_MDLID_FINAL) + 1;
  1705. }
  1706. set_mcl_conf(chan, mcl);
  1707. return;
  1708. drop:
  1709. g_io_channel_shutdown(chan, TRUE, NULL);
  1710. }
  1711. struct mcap_instance *mcap_create_instance(const bdaddr_t *src,
  1712. BtIOSecLevel sec,
  1713. uint16_t ccpsm,
  1714. uint16_t dcpsm,
  1715. mcap_mcl_event_cb mcl_connected,
  1716. mcap_mcl_event_cb mcl_reconnected,
  1717. mcap_mcl_event_cb mcl_disconnected,
  1718. mcap_mcl_event_cb mcl_uncached,
  1719. mcap_info_ind_event_cb mcl_sync_info_ind,
  1720. gpointer user_data,
  1721. GError **gerr)
  1722. {
  1723. struct mcap_instance *mi;
  1724. if (sec < BT_IO_SEC_MEDIUM) {
  1725. g_set_error(gerr, MCAP_ERROR, MCAP_ERROR_INVALID_ARGS,
  1726. "Security level can't be minor of %d",
  1727. BT_IO_SEC_MEDIUM);
  1728. return NULL;
  1729. }
  1730. if (!(mcl_connected && mcl_reconnected &&
  1731. mcl_disconnected && mcl_uncached)) {
  1732. g_set_error(gerr, MCAP_ERROR, MCAP_ERROR_INVALID_ARGS,
  1733. "The callbacks can't be null");
  1734. return NULL;
  1735. }
  1736. mi = g_new0(struct mcap_instance, 1);
  1737. bacpy(&mi->src, src);
  1738. mi->sec = sec;
  1739. mi->mcl_connected_cb = mcl_connected;
  1740. mi->mcl_reconnected_cb = mcl_reconnected;
  1741. mi->mcl_disconnected_cb = mcl_disconnected;
  1742. mi->mcl_uncached_cb = mcl_uncached;
  1743. mi->mcl_sync_infoind_cb = mcl_sync_info_ind;
  1744. mi->user_data = user_data;
  1745. mi->csp_enabled = FALSE;
  1746. /* Listen incoming connections in control channel */
  1747. mi->ccio = bt_io_listen(connect_mcl_event_cb, NULL, mi,
  1748. NULL, gerr,
  1749. BT_IO_OPT_SOURCE_BDADDR, &mi->src,
  1750. BT_IO_OPT_PSM, ccpsm,
  1751. BT_IO_OPT_MTU, MCAP_CC_MTU,
  1752. BT_IO_OPT_SEC_LEVEL, sec,
  1753. BT_IO_OPT_MODE, BT_IO_MODE_ERTM,
  1754. BT_IO_OPT_INVALID);
  1755. if (!mi->ccio) {
  1756. error("%s", (*gerr)->message);
  1757. g_free(mi);
  1758. return NULL;
  1759. }
  1760. /* Listen incoming connections in data channels */
  1761. mi->dcio = bt_io_listen(connect_dc_event_cb, NULL, mi,
  1762. NULL, gerr,
  1763. BT_IO_OPT_SOURCE_BDADDR, &mi->src,
  1764. BT_IO_OPT_PSM, dcpsm,
  1765. BT_IO_OPT_MTU, MCAP_DC_MTU,
  1766. BT_IO_OPT_SEC_LEVEL, sec,
  1767. BT_IO_OPT_INVALID);
  1768. if (!mi->dcio) {
  1769. g_io_channel_shutdown(mi->ccio, TRUE, NULL);
  1770. g_io_channel_unref(mi->ccio);
  1771. mi->ccio = NULL;
  1772. error("%s", (*gerr)->message);
  1773. g_free(mi);
  1774. return NULL;
  1775. }
  1776. /* Initialize random seed to generate mdlids for this instance */
  1777. srand(time(NULL));
  1778. return mcap_instance_ref(mi);
  1779. }
  1780. void mcap_release_instance(struct mcap_instance *mi)
  1781. {
  1782. GSList *l;
  1783. if (!mi)
  1784. return;
  1785. if (mi->ccio) {
  1786. g_io_channel_shutdown(mi->ccio, TRUE, NULL);
  1787. g_io_channel_unref(mi->ccio);
  1788. mi->ccio = NULL;
  1789. }
  1790. if (mi->dcio) {
  1791. g_io_channel_shutdown(mi->dcio, TRUE, NULL);
  1792. g_io_channel_unref(mi->dcio);
  1793. mi->dcio = NULL;
  1794. }
  1795. for (l = mi->mcls; l; l = l->next) {
  1796. mcap_mcl_release(l->data);
  1797. mcap_mcl_unref(l->data);
  1798. }
  1799. g_slist_free(mi->mcls);
  1800. mi->mcls = NULL;
  1801. for (l = mi->cached; l; l = l->next) {
  1802. mcap_mcl_release(l->data);
  1803. mcap_mcl_unref(l->data);
  1804. }
  1805. g_slist_free(mi->cached);
  1806. mi->cached = NULL;
  1807. }
  1808. struct mcap_instance *mcap_instance_ref(struct mcap_instance *mi)
  1809. {
  1810. mi->ref++;
  1811. DBG("mcap_instance_ref(%p): ref=%d", mi, mi->ref);
  1812. return mi;
  1813. }
  1814. void mcap_instance_unref(struct mcap_instance *mi)
  1815. {
  1816. mi->ref--;
  1817. DBG("mcap_instance_unref(%p): ref=%d", mi, mi->ref);
  1818. if (mi->ref > 0)
  1819. return;
  1820. mcap_release_instance(mi);
  1821. g_free(mi);
  1822. }
  1823. uint16_t mcap_get_ctrl_psm(struct mcap_instance *mi, GError **err)
  1824. {
  1825. uint16_t lpsm;
  1826. if (!(mi && mi->ccio)) {
  1827. g_set_error(err, MCAP_ERROR, MCAP_ERROR_INVALID_ARGS,
  1828. "Invalid MCAP instance");
  1829. return 0;
  1830. }
  1831. if (!bt_io_get(mi->ccio, err, BT_IO_OPT_PSM, &lpsm, BT_IO_OPT_INVALID))
  1832. return 0;
  1833. return lpsm;
  1834. }
  1835. uint16_t mcap_get_data_psm(struct mcap_instance *mi, GError **err)
  1836. {
  1837. uint16_t lpsm;
  1838. if (!(mi && mi->dcio)) {
  1839. g_set_error(err, MCAP_ERROR, MCAP_ERROR_INVALID_ARGS,
  1840. "Invalid MCAP instance");
  1841. return 0;
  1842. }
  1843. if (!bt_io_get(mi->dcio, err, BT_IO_OPT_PSM, &lpsm, BT_IO_OPT_INVALID))
  1844. return 0;
  1845. return lpsm;
  1846. }
  1847. gboolean mcap_set_data_chan_mode(struct mcap_instance *mi, uint8_t mode,
  1848. GError **err)
  1849. {
  1850. if (!(mi && mi->dcio)) {
  1851. g_set_error(err, MCAP_ERROR, MCAP_ERROR_INVALID_ARGS,
  1852. "Invalid MCAP instance");
  1853. return FALSE;
  1854. }
  1855. return bt_io_set(mi->dcio, err, BT_IO_OPT_MODE, mode,
  1856. BT_IO_OPT_INVALID);
  1857. }
  1858. struct mcap_mdl *mcap_mdl_ref(struct mcap_mdl *mdl)
  1859. {
  1860. mdl->ref++;
  1861. DBG("mcap_mdl_ref(%p): ref=%d", mdl, mdl->ref);
  1862. return mdl;
  1863. }
  1864. void mcap_mdl_unref(struct mcap_mdl *mdl)
  1865. {
  1866. mdl->ref--;
  1867. DBG("mcap_mdl_unref(%p): ref=%d", mdl, mdl->ref);
  1868. if (mdl->ref > 0)
  1869. return;
  1870. free_mdl(mdl);
  1871. }
  1872. static int send_sync_cmd(struct mcap_mcl *mcl, const void *buf, uint32_t size)
  1873. {
  1874. int sock;
  1875. if (mcl->cc == NULL)
  1876. return -1;
  1877. sock = g_io_channel_unix_get_fd(mcl->cc);
  1878. return mcap_send_data(sock, buf, size);
  1879. }
  1880. static int send_unsupported_cap_req(struct mcap_mcl *mcl)
  1881. {
  1882. mcap_md_sync_cap_rsp *cmd;
  1883. int sent;
  1884. cmd = g_new0(mcap_md_sync_cap_rsp, 1);
  1885. cmd->op = MCAP_MD_SYNC_CAP_RSP;
  1886. cmd->rc = MCAP_REQUEST_NOT_SUPPORTED;
  1887. sent = send_sync_cmd(mcl, cmd, sizeof(*cmd));
  1888. g_free(cmd);
  1889. return sent;
  1890. }
  1891. static int send_unsupported_set_req(struct mcap_mcl *mcl)
  1892. {
  1893. mcap_md_sync_set_rsp *cmd;
  1894. int sent;
  1895. cmd = g_new0(mcap_md_sync_set_rsp, 1);
  1896. cmd->op = MCAP_MD_SYNC_SET_RSP;
  1897. cmd->rc = MCAP_REQUEST_NOT_SUPPORTED;
  1898. sent = send_sync_cmd(mcl, cmd, sizeof(*cmd));
  1899. g_free(cmd);
  1900. return sent;
  1901. }
  1902. static void reset_tmstamp(struct mcap_csp *csp, struct timespec *base_time,
  1903. uint64_t new_tmstamp)
  1904. {
  1905. csp->base_tmstamp = new_tmstamp;
  1906. if (base_time)
  1907. csp->base_time = *base_time;
  1908. else
  1909. clock_gettime(CLK, &csp->base_time);
  1910. }
  1911. void mcap_sync_init(struct mcap_mcl *mcl)
  1912. {
  1913. if (!mcl->mi->csp_enabled) {
  1914. mcl->csp = NULL;
  1915. return;
  1916. }
  1917. mcl->csp = g_new0(struct mcap_csp, 1);
  1918. mcl->csp->rem_req_acc = 10000; /* safe divisor */
  1919. mcl->csp->set_data = NULL;
  1920. mcl->csp->csp_priv_data = NULL;
  1921. reset_tmstamp(mcl->csp, NULL, 0);
  1922. }
  1923. void mcap_sync_stop(struct mcap_mcl *mcl)
  1924. {
  1925. if (!mcl->csp)
  1926. return;
  1927. if (mcl->csp->ind_timer)
  1928. g_source_remove(mcl->csp->ind_timer);
  1929. if (mcl->csp->set_timer)
  1930. g_source_remove(mcl->csp->set_timer);
  1931. if (mcl->csp->set_data)
  1932. g_free(mcl->csp->set_data);
  1933. if (mcl->csp->csp_priv_data)
  1934. g_free(mcl->csp->csp_priv_data);
  1935. mcl->csp->ind_timer = 0;
  1936. mcl->csp->set_timer = 0;
  1937. mcl->csp->set_data = NULL;
  1938. mcl->csp->csp_priv_data = NULL;
  1939. g_free(mcl->csp);
  1940. mcl->csp = NULL;
  1941. }
  1942. static uint64_t time_us(struct timespec *tv)
  1943. {
  1944. return tv->tv_sec * 1000000ll + tv->tv_nsec / 1000ll;
  1945. }
  1946. static int64_t bt2us(int bt)
  1947. {
  1948. return bt * 312.5;
  1949. }
  1950. static int bt2ms(int bt)
  1951. {
  1952. return bt * 312.5 / 1000;
  1953. }
  1954. static int btoffset(uint32_t btclk1, uint32_t btclk2)
  1955. {
  1956. int offset = btclk2 - btclk1;
  1957. if (offset <= -MCAP_BTCLOCK_HALF)
  1958. offset += MCAP_BTCLOCK_FIELD;
  1959. else if (offset > MCAP_BTCLOCK_HALF)
  1960. offset -= MCAP_BTCLOCK_FIELD;
  1961. return offset;
  1962. }
  1963. static int btdiff(uint32_t btclk1, uint32_t btclk2)
  1964. {
  1965. return btoffset(btclk1, btclk2);
  1966. }
  1967. static gboolean valid_btclock(uint32_t btclk)
  1968. {
  1969. return btclk <= MCAP_BTCLOCK_MAX;
  1970. }
  1971. /* This call may fail; either deal with retry or use read_btclock_retry */
  1972. static gboolean read_btclock(struct mcap_mcl *mcl, uint32_t *btclock,
  1973. uint16_t *btaccuracy)
  1974. {
  1975. /*
  1976. * FIXME: btd_adapter_read_clock(...) always return FALSE, current
  1977. * code doesn't support CSP (Clock Synchronization Protocol). To avoid
  1978. * build dependancy on struct 'btd_adapter', removing this code.
  1979. */
  1980. return FALSE;
  1981. }
  1982. static gboolean read_btclock_retry(struct mcap_mcl *mcl, uint32_t *btclock,
  1983. uint16_t *btaccuracy)
  1984. {
  1985. int retries = 5;
  1986. while (--retries >= 0) {
  1987. if (read_btclock(mcl, btclock, btaccuracy))
  1988. return TRUE;
  1989. DBG("CSP: retrying to read bt clock...");
  1990. }
  1991. return FALSE;
  1992. }
  1993. static gboolean get_btrole(struct mcap_mcl *mcl)
  1994. {
  1995. int sock, flags;
  1996. socklen_t len;
  1997. if (mcl->cc == NULL)
  1998. return -1;
  1999. sock = g_io_channel_unix_get_fd(mcl->cc);
  2000. len = sizeof(flags);
  2001. if (getsockopt(sock, SOL_L2CAP, L2CAP_LM, &flags, &len))
  2002. DBG("CSP: could not read role");
  2003. return flags & L2CAP_LM_MASTER;
  2004. }
  2005. uint64_t mcap_get_timestamp(struct mcap_mcl *mcl,
  2006. struct timespec *given_time)
  2007. {
  2008. struct timespec now;
  2009. uint64_t tmstamp;
  2010. if (!mcl->csp)
  2011. return MCAP_TMSTAMP_DONTSET;
  2012. if (given_time)
  2013. now = *given_time;
  2014. else
  2015. if (clock_gettime(CLK, &now) < 0)
  2016. return MCAP_TMSTAMP_DONTSET;
  2017. tmstamp = time_us(&now) - time_us(&mcl->csp->base_time)
  2018. + mcl->csp->base_tmstamp;
  2019. return tmstamp;
  2020. }
  2021. uint32_t mcap_get_btclock(struct mcap_mcl *mcl)
  2022. {
  2023. uint32_t btclock;
  2024. uint16_t accuracy;
  2025. if (!mcl->csp)
  2026. return MCAP_BTCLOCK_IMMEDIATE;
  2027. if (!read_btclock_retry(mcl, &btclock, &accuracy))
  2028. btclock = 0xffffffff;
  2029. return btclock;
  2030. }
  2031. static gboolean initialize_caps(struct mcap_mcl *mcl)
  2032. {
  2033. struct timespec t1, t2;
  2034. int latencies[SAMPLE_COUNT];
  2035. int latency, avg, dev;
  2036. uint32_t btclock;
  2037. uint16_t btaccuracy;
  2038. int i;
  2039. int retries;
  2040. clock_getres(CLK, &t1);
  2041. _caps.ts_res = time_us(&t1);
  2042. if (_caps.ts_res < 1)
  2043. _caps.ts_res = 1;
  2044. _caps.ts_acc = 20; /* ppm, estimated */
  2045. /* A little exercise before measuing latency */
  2046. clock_gettime(CLK, &t1);
  2047. read_btclock_retry(mcl, &btclock, &btaccuracy);
  2048. /* Read clock a number of times and measure latency */
  2049. avg = 0;
  2050. i = 0;
  2051. retries = MAX_RETRIES;
  2052. while (i < SAMPLE_COUNT && retries > 0) {
  2053. clock_gettime(CLK, &t1);
  2054. if (!read_btclock(mcl, &btclock, &btaccuracy)) {
  2055. retries--;
  2056. continue;
  2057. }
  2058. clock_gettime(CLK, &t2);
  2059. latency = time_us(&t2) - time_us(&t1);
  2060. latencies[i] = latency;
  2061. avg += latency;
  2062. i++;
  2063. }
  2064. if (retries <= 0)
  2065. return FALSE;
  2066. /* Calculate average and deviation */
  2067. avg /= SAMPLE_COUNT;
  2068. dev = 0;
  2069. for (i = 0; i < SAMPLE_COUNT; ++i)
  2070. dev += abs(latencies[i] - avg);
  2071. dev /= SAMPLE_COUNT;
  2072. /* Calculate corrected average, without 'freak' latencies */
  2073. latency = 0;
  2074. for (i = 0; i < SAMPLE_COUNT; ++i) {
  2075. if (latencies[i] > (avg + dev * 6))
  2076. latency += avg;
  2077. else
  2078. latency += latencies[i];
  2079. }
  2080. latency /= SAMPLE_COUNT;
  2081. _caps.latency = latency;
  2082. _caps.preempt_thresh = latency * 4;
  2083. _caps.syncleadtime_ms = latency * 50 / 1000;
  2084. csp_caps_initialized = TRUE;
  2085. return TRUE;
  2086. }
  2087. static struct csp_caps *caps(struct mcap_mcl *mcl)
  2088. {
  2089. if (!csp_caps_initialized)
  2090. if (!initialize_caps(mcl)) {
  2091. /* Temporary failure in reading BT clock */
  2092. return NULL;
  2093. }
  2094. return &_caps;
  2095. }
  2096. static int send_sync_cap_rsp(struct mcap_mcl *mcl, uint8_t rspcode,
  2097. uint8_t btclockres, uint16_t synclead,
  2098. uint16_t tmstampres, uint16_t tmstampacc)
  2099. {
  2100. mcap_md_sync_cap_rsp *rsp;
  2101. int sent;
  2102. rsp = g_new0(mcap_md_sync_cap_rsp, 1);
  2103. rsp->op = MCAP_MD_SYNC_CAP_RSP;
  2104. rsp->rc = rspcode;
  2105. rsp->btclock = btclockres;
  2106. rsp->sltime = htons(synclead);
  2107. rsp->timestnr = htons(tmstampres);
  2108. rsp->timestna = htons(tmstampacc);
  2109. sent = send_sync_cmd(mcl, rsp, sizeof(*rsp));
  2110. g_free(rsp);
  2111. return sent;
  2112. }
  2113. static void proc_sync_cap_req(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
  2114. {
  2115. mcap_md_sync_cap_req *req;
  2116. uint16_t required_accuracy;
  2117. uint16_t our_accuracy;
  2118. uint32_t btclock;
  2119. uint16_t btres;
  2120. if (len != sizeof(mcap_md_sync_cap_req)) {
  2121. send_sync_cap_rsp(mcl, MCAP_INVALID_PARAM_VALUE,
  2122. 0, 0, 0, 0);
  2123. return;
  2124. }
  2125. if (!caps(mcl)) {
  2126. send_sync_cap_rsp(mcl, MCAP_RESOURCE_UNAVAILABLE,
  2127. 0, 0, 0, 0);
  2128. return;
  2129. }
  2130. req = (mcap_md_sync_cap_req *) cmd;
  2131. required_accuracy = ntohs(req->timest);
  2132. our_accuracy = caps(mcl)->ts_acc;
  2133. btres = 0;
  2134. if (required_accuracy < our_accuracy || required_accuracy < 1) {
  2135. send_sync_cap_rsp(mcl, MCAP_RESOURCE_UNAVAILABLE,
  2136. 0, 0, 0, 0);
  2137. return;
  2138. }
  2139. if (!read_btclock_retry(mcl, &btclock, &btres)) {
  2140. send_sync_cap_rsp(mcl, MCAP_RESOURCE_UNAVAILABLE,
  2141. 0, 0, 0, 0);
  2142. return;
  2143. }
  2144. mcl->csp->remote_caps = 1;
  2145. mcl->csp->rem_req_acc = required_accuracy;
  2146. send_sync_cap_rsp(mcl, MCAP_SUCCESS, btres,
  2147. caps(mcl)->syncleadtime_ms,
  2148. caps(mcl)->ts_res, our_accuracy);
  2149. }
  2150. static int send_sync_set_rsp(struct mcap_mcl *mcl, uint8_t rspcode,
  2151. uint32_t btclock, uint64_t timestamp,
  2152. uint16_t tmstampres)
  2153. {
  2154. mcap_md_sync_set_rsp *rsp;
  2155. int sent;
  2156. rsp = g_new0(mcap_md_sync_set_rsp, 1);
  2157. rsp->op = MCAP_MD_SYNC_SET_RSP;
  2158. rsp->rc = rspcode;
  2159. rsp->btclock = htonl(btclock);
  2160. rsp->timestst = hton64(timestamp);
  2161. rsp->timestsa = htons(tmstampres);
  2162. sent = send_sync_cmd(mcl, rsp, sizeof(*rsp));
  2163. g_free(rsp);
  2164. return sent;
  2165. }
  2166. static gboolean get_all_clocks(struct mcap_mcl *mcl, uint32_t *btclock,
  2167. struct timespec *base_time,
  2168. uint64_t *timestamp)
  2169. {
  2170. int latency;
  2171. int retry = 5;
  2172. uint16_t btres;
  2173. struct timespec t0;
  2174. if (!caps(mcl))
  2175. return FALSE;
  2176. latency = caps(mcl)->preempt_thresh + 1;
  2177. while (latency > caps(mcl)->preempt_thresh && --retry >= 0) {
  2178. if (clock_gettime(CLK, &t0) < 0)
  2179. return FALSE;
  2180. if (!read_btclock(mcl, btclock, &btres))
  2181. continue;
  2182. if (clock_gettime(CLK, base_time) < 0)
  2183. return FALSE;
  2184. /*
  2185. * Tries to detect preemption between clock_gettime
  2186. * and read_btclock by measuring transaction time
  2187. */
  2188. latency = time_us(base_time) - time_us(&t0);
  2189. }
  2190. if (retry < 0)
  2191. return FALSE;
  2192. *timestamp = mcap_get_timestamp(mcl, base_time);
  2193. return TRUE;
  2194. }
  2195. static gboolean sync_send_indication(gpointer user_data)
  2196. {
  2197. struct mcap_mcl *mcl;
  2198. mcap_md_sync_info_ind *cmd;
  2199. uint32_t btclock;
  2200. uint64_t tmstamp;
  2201. struct timespec base_time;
  2202. int sent;
  2203. if (!user_data)
  2204. return FALSE;
  2205. btclock = 0;
  2206. mcl = user_data;
  2207. if (!caps(mcl))
  2208. return FALSE;
  2209. if (!get_all_clocks(mcl, &btclock, &base_time, &tmstamp))
  2210. return FALSE;
  2211. cmd = g_new0(mcap_md_sync_info_ind, 1);
  2212. cmd->op = MCAP_MD_SYNC_INFO_IND;
  2213. cmd->btclock = htonl(btclock);
  2214. cmd->timestst = hton64(tmstamp);
  2215. cmd->timestsa = htons(caps(mcl)->latency);
  2216. sent = send_sync_cmd(mcl, cmd, sizeof(*cmd));
  2217. g_free(cmd);
  2218. return !sent;
  2219. }
  2220. static gboolean proc_sync_set_req_phase2(gpointer user_data)
  2221. {
  2222. struct mcap_mcl *mcl;
  2223. struct sync_set_data *data;
  2224. uint8_t update;
  2225. uint32_t sched_btclock;
  2226. uint64_t new_tmstamp;
  2227. int ind_freq;
  2228. int role;
  2229. uint32_t btclock;
  2230. uint64_t tmstamp;
  2231. struct timespec base_time;
  2232. uint16_t tmstampacc;
  2233. gboolean reset;
  2234. int delay;
  2235. if (!user_data)
  2236. return FALSE;
  2237. mcl = user_data;
  2238. if (!mcl->csp->set_data)
  2239. return FALSE;
  2240. btclock = 0;
  2241. data = mcl->csp->set_data;
  2242. update = data->update;
  2243. sched_btclock = data->sched_btclock;
  2244. new_tmstamp = data->timestamp;
  2245. ind_freq = data->ind_freq;
  2246. role = data->role;
  2247. if (!caps(mcl)) {
  2248. send_sync_set_rsp(mcl, MCAP_UNSPECIFIED_ERROR, 0, 0, 0);
  2249. return FALSE;
  2250. }
  2251. if (!get_all_clocks(mcl, &btclock, &base_time, &tmstamp)) {
  2252. send_sync_set_rsp(mcl, MCAP_UNSPECIFIED_ERROR, 0, 0, 0);
  2253. return FALSE;
  2254. }
  2255. if (get_btrole(mcl) != role) {
  2256. send_sync_set_rsp(mcl, MCAP_INVALID_OPERATION, 0, 0, 0);
  2257. return FALSE;
  2258. }
  2259. reset = (new_tmstamp != MCAP_TMSTAMP_DONTSET);
  2260. if (reset) {
  2261. if (sched_btclock != MCAP_BTCLOCK_IMMEDIATE) {
  2262. delay = bt2us(btdiff(sched_btclock, btclock));
  2263. if (delay >= 0 || ((new_tmstamp - delay) > 0)) {
  2264. new_tmstamp += delay;
  2265. DBG("CSP: reset w/ delay %dus, compensated",
  2266. delay);
  2267. } else
  2268. DBG("CSP: reset w/ delay %dus, uncompensated",
  2269. delay);
  2270. }
  2271. reset_tmstamp(mcl->csp, &base_time, new_tmstamp);
  2272. tmstamp = new_tmstamp;
  2273. }
  2274. tmstampacc = caps(mcl)->latency + caps(mcl)->ts_acc;
  2275. if (mcl->csp->ind_timer) {
  2276. g_source_remove(mcl->csp->ind_timer);
  2277. mcl->csp->ind_timer = 0;
  2278. }
  2279. if (update) {
  2280. int when = ind_freq + caps(mcl)->syncleadtime_ms;
  2281. mcl->csp->ind_timer = g_timeout_add(when,
  2282. sync_send_indication,
  2283. mcl);
  2284. }
  2285. send_sync_set_rsp(mcl, MCAP_SUCCESS, btclock, tmstamp, tmstampacc);
  2286. /* First indication after set is immediate */
  2287. if (update)
  2288. sync_send_indication(mcl);
  2289. return FALSE;
  2290. }
  2291. static void proc_sync_set_req(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
  2292. {
  2293. mcap_md_sync_set_req *req;
  2294. uint32_t sched_btclock, cur_btclock;
  2295. uint16_t btres;
  2296. uint8_t update;
  2297. uint64_t timestamp;
  2298. struct sync_set_data *set_data;
  2299. int phase2_delay, ind_freq, when;
  2300. if (len != sizeof(mcap_md_sync_set_req)) {
  2301. send_sync_set_rsp(mcl, MCAP_INVALID_PARAM_VALUE, 0, 0, 0);
  2302. return;
  2303. }
  2304. req = (mcap_md_sync_set_req *) cmd;
  2305. sched_btclock = ntohl(req->btclock);
  2306. update = req->timestui;
  2307. timestamp = ntoh64(req->timestst);
  2308. cur_btclock = 0;
  2309. if (sched_btclock != MCAP_BTCLOCK_IMMEDIATE &&
  2310. !valid_btclock(sched_btclock)) {
  2311. send_sync_set_rsp(mcl, MCAP_INVALID_PARAM_VALUE, 0, 0, 0);
  2312. return;
  2313. }
  2314. if (update > 1) {
  2315. send_sync_set_rsp(mcl, MCAP_INVALID_PARAM_VALUE, 0, 0, 0);
  2316. return;
  2317. }
  2318. if (!mcl->csp->remote_caps) {
  2319. /* Remote side did not ask our capabilities yet */
  2320. send_sync_set_rsp(mcl, MCAP_INVALID_PARAM_VALUE, 0, 0, 0);
  2321. return;
  2322. }
  2323. if (!caps(mcl)) {
  2324. send_sync_set_rsp(mcl, MCAP_UNSPECIFIED_ERROR, 0, 0, 0);
  2325. return;
  2326. }
  2327. if (!read_btclock_retry(mcl, &cur_btclock, &btres)) {
  2328. send_sync_set_rsp(mcl, MCAP_UNSPECIFIED_ERROR, 0, 0, 0);
  2329. return;
  2330. }
  2331. if (sched_btclock == MCAP_BTCLOCK_IMMEDIATE)
  2332. phase2_delay = 0;
  2333. else {
  2334. phase2_delay = btdiff(cur_btclock, sched_btclock);
  2335. if (phase2_delay < 0) {
  2336. /* can not reset in the past tense */
  2337. send_sync_set_rsp(mcl, MCAP_INVALID_PARAM_VALUE,
  2338. 0, 0, 0);
  2339. return;
  2340. }
  2341. /* Convert to miliseconds */
  2342. phase2_delay = bt2ms(phase2_delay);
  2343. if (phase2_delay > 61*1000) {
  2344. /* More than 60 seconds in the future */
  2345. send_sync_set_rsp(mcl, MCAP_INVALID_PARAM_VALUE,
  2346. 0, 0, 0);
  2347. return;
  2348. } else if (phase2_delay < caps(mcl)->latency / 1000) {
  2349. /* Too fast for us to do in time */
  2350. send_sync_set_rsp(mcl, MCAP_INVALID_PARAM_VALUE,
  2351. 0, 0, 0);
  2352. return;
  2353. }
  2354. }
  2355. if (update) {
  2356. /*
  2357. * Indication frequency: required accuracy divided by ours
  2358. * Converted to milisseconds
  2359. */
  2360. ind_freq = (1000 * mcl->csp->rem_req_acc) / caps(mcl)->ts_acc;
  2361. if (ind_freq < MAX(caps(mcl)->latency * 2 / 1000, 100)) {
  2362. /* Too frequent, we can't handle */
  2363. send_sync_set_rsp(mcl, MCAP_INVALID_PARAM_VALUE,
  2364. 0, 0, 0);
  2365. return;
  2366. }
  2367. DBG("CSP: indication every %dms", ind_freq);
  2368. } else
  2369. ind_freq = 0;
  2370. if (mcl->csp->ind_timer) {
  2371. /* Old indications are no longer sent */
  2372. g_source_remove(mcl->csp->ind_timer);
  2373. mcl->csp->ind_timer = 0;
  2374. }
  2375. if (!mcl->csp->set_data)
  2376. mcl->csp->set_data = g_new0(struct sync_set_data, 1);
  2377. set_data = (struct sync_set_data *) mcl->csp->set_data;
  2378. set_data->update = update;
  2379. set_data->sched_btclock = sched_btclock;
  2380. set_data->timestamp = timestamp;
  2381. set_data->ind_freq = ind_freq;
  2382. set_data->role = get_btrole(mcl);
  2383. /*
  2384. * TODO is there some way to schedule a call based directly on
  2385. * a BT clock value, instead of this estimation that uses
  2386. * the SO clock?
  2387. */
  2388. if (phase2_delay > 0) {
  2389. when = phase2_delay + caps(mcl)->syncleadtime_ms;
  2390. mcl->csp->set_timer = g_timeout_add(when,
  2391. proc_sync_set_req_phase2,
  2392. mcl);
  2393. } else
  2394. proc_sync_set_req_phase2(mcl);
  2395. /* First indication is immediate */
  2396. if (update)
  2397. sync_send_indication(mcl);
  2398. }
  2399. static void proc_sync_cap_rsp(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
  2400. {
  2401. mcap_md_sync_cap_rsp *rsp;
  2402. uint8_t mcap_err;
  2403. uint8_t btclockres;
  2404. uint16_t synclead;
  2405. uint16_t tmstampres;
  2406. uint16_t tmstampacc;
  2407. struct mcap_sync_cap_cbdata *cbdata;
  2408. mcap_sync_cap_cb cb;
  2409. gpointer user_data;
  2410. if (mcl->csp->csp_req != MCAP_MD_SYNC_CAP_REQ) {
  2411. DBG("CSP: got unexpected cap respose");
  2412. return;
  2413. }
  2414. if (!mcl->csp->csp_priv_data) {
  2415. DBG("CSP: no priv data for cap respose");
  2416. return;
  2417. }
  2418. cbdata = mcl->csp->csp_priv_data;
  2419. cb = cbdata->cb;
  2420. user_data = cbdata->user_data;
  2421. g_free(cbdata);
  2422. mcl->csp->csp_priv_data = NULL;
  2423. mcl->csp->csp_req = 0;
  2424. if (len != sizeof(mcap_md_sync_cap_rsp)) {
  2425. DBG("CSP: got corrupted cap respose");
  2426. return;
  2427. }
  2428. rsp = (mcap_md_sync_cap_rsp *) cmd;
  2429. mcap_err = rsp->rc;
  2430. btclockres = rsp->btclock;
  2431. synclead = ntohs(rsp->sltime);
  2432. tmstampres = ntohs(rsp->timestnr);
  2433. tmstampacc = ntohs(rsp->timestna);
  2434. if (!mcap_err)
  2435. mcl->csp->local_caps = TRUE;
  2436. cb(mcl, mcap_err, btclockres, synclead, tmstampres, tmstampacc, NULL,
  2437. user_data);
  2438. }
  2439. static void proc_sync_set_rsp(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
  2440. {
  2441. mcap_md_sync_set_rsp *rsp;
  2442. uint8_t mcap_err;
  2443. uint32_t btclock;
  2444. uint64_t timestamp;
  2445. uint16_t accuracy;
  2446. struct mcap_sync_set_cbdata *cbdata;
  2447. mcap_sync_set_cb cb;
  2448. gpointer user_data;
  2449. if (mcl->csp->csp_req != MCAP_MD_SYNC_SET_REQ) {
  2450. DBG("CSP: got unexpected set respose");
  2451. return;
  2452. }
  2453. if (!mcl->csp->csp_priv_data) {
  2454. DBG("CSP: no priv data for set respose");
  2455. return;
  2456. }
  2457. cbdata = mcl->csp->csp_priv_data;
  2458. cb = cbdata->cb;
  2459. user_data = cbdata->user_data;
  2460. g_free(cbdata);
  2461. mcl->csp->csp_priv_data = NULL;
  2462. mcl->csp->csp_req = 0;
  2463. if (len != sizeof(mcap_md_sync_set_rsp)) {
  2464. DBG("CSP: got corrupted set respose");
  2465. return;
  2466. }
  2467. rsp = (mcap_md_sync_set_rsp *) cmd;
  2468. mcap_err = rsp->rc;
  2469. btclock = ntohl(rsp->btclock);
  2470. timestamp = ntoh64(rsp->timestst);
  2471. accuracy = ntohs(rsp->timestsa);
  2472. if (!mcap_err && !valid_btclock(btclock))
  2473. mcap_err = MCAP_ERROR_INVALID_ARGS;
  2474. cb(mcl, mcap_err, btclock, timestamp, accuracy, NULL, user_data);
  2475. }
  2476. static void proc_sync_info_ind(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
  2477. {
  2478. mcap_md_sync_info_ind *req;
  2479. struct sync_info_ind_data data;
  2480. uint32_t btclock;
  2481. if (!mcl->csp->ind_expected) {
  2482. DBG("CSP: received unexpected info indication");
  2483. return;
  2484. }
  2485. if (len != sizeof(mcap_md_sync_info_ind))
  2486. return;
  2487. req = (mcap_md_sync_info_ind *) cmd;
  2488. btclock = ntohl(req->btclock);
  2489. if (!valid_btclock(btclock))
  2490. return;
  2491. data.btclock = btclock;
  2492. data.timestamp = ntoh64(req->timestst);
  2493. data.accuracy = ntohs(req->timestsa);
  2494. if (mcl->mi->mcl_sync_infoind_cb)
  2495. mcl->mi->mcl_sync_infoind_cb(mcl, &data);
  2496. }
  2497. void proc_sync_cmd(struct mcap_mcl *mcl, uint8_t *cmd, uint32_t len)
  2498. {
  2499. if (!mcl->mi->csp_enabled || !mcl->csp) {
  2500. switch (cmd[0]) {
  2501. case MCAP_MD_SYNC_CAP_REQ:
  2502. send_unsupported_cap_req(mcl);
  2503. break;
  2504. case MCAP_MD_SYNC_SET_REQ:
  2505. send_unsupported_set_req(mcl);
  2506. break;
  2507. }
  2508. return;
  2509. }
  2510. switch (cmd[0]) {
  2511. case MCAP_MD_SYNC_CAP_REQ:
  2512. proc_sync_cap_req(mcl, cmd, len);
  2513. break;
  2514. case MCAP_MD_SYNC_CAP_RSP:
  2515. proc_sync_cap_rsp(mcl, cmd, len);
  2516. break;
  2517. case MCAP_MD_SYNC_SET_REQ:
  2518. proc_sync_set_req(mcl, cmd, len);
  2519. break;
  2520. case MCAP_MD_SYNC_SET_RSP:
  2521. proc_sync_set_rsp(mcl, cmd, len);
  2522. break;
  2523. case MCAP_MD_SYNC_INFO_IND:
  2524. proc_sync_info_ind(mcl, cmd, len);
  2525. break;
  2526. }
  2527. }
  2528. void mcap_sync_cap_req(struct mcap_mcl *mcl, uint16_t reqacc,
  2529. mcap_sync_cap_cb cb, gpointer user_data,
  2530. GError **err)
  2531. {
  2532. struct mcap_sync_cap_cbdata *cbdata;
  2533. mcap_md_sync_cap_req *cmd;
  2534. if (!mcl->mi->csp_enabled || !mcl->csp) {
  2535. g_set_error(err,
  2536. MCAP_CSP_ERROR,
  2537. MCAP_ERROR_RESOURCE_UNAVAILABLE,
  2538. "CSP not enabled for the instance");
  2539. return;
  2540. }
  2541. if (mcl->csp->csp_req) {
  2542. g_set_error(err,
  2543. MCAP_CSP_ERROR,
  2544. MCAP_ERROR_RESOURCE_UNAVAILABLE,
  2545. "Pending CSP request");
  2546. return;
  2547. }
  2548. mcl->csp->csp_req = MCAP_MD_SYNC_CAP_REQ;
  2549. cmd = g_new0(mcap_md_sync_cap_req, 1);
  2550. cmd->op = MCAP_MD_SYNC_CAP_REQ;
  2551. cmd->timest = htons(reqacc);
  2552. cbdata = g_new0(struct mcap_sync_cap_cbdata, 1);
  2553. cbdata->cb = cb;
  2554. cbdata->user_data = user_data;
  2555. mcl->csp->csp_priv_data = cbdata;
  2556. send_sync_cmd(mcl, cmd, sizeof(*cmd));
  2557. g_free(cmd);
  2558. }
  2559. void mcap_sync_set_req(struct mcap_mcl *mcl, uint8_t update, uint32_t btclock,
  2560. uint64_t timestamp, mcap_sync_set_cb cb,
  2561. gpointer user_data, GError **err)
  2562. {
  2563. mcap_md_sync_set_req *cmd;
  2564. struct mcap_sync_set_cbdata *cbdata;
  2565. if (!mcl->mi->csp_enabled || !mcl->csp) {
  2566. g_set_error(err,
  2567. MCAP_CSP_ERROR,
  2568. MCAP_ERROR_RESOURCE_UNAVAILABLE,
  2569. "CSP not enabled for the instance");
  2570. return;
  2571. }
  2572. if (!mcl->csp->local_caps) {
  2573. g_set_error(err,
  2574. MCAP_CSP_ERROR,
  2575. MCAP_ERROR_RESOURCE_UNAVAILABLE,
  2576. "Did not get CSP caps from peripheral yet");
  2577. return;
  2578. }
  2579. if (mcl->csp->csp_req) {
  2580. g_set_error(err,
  2581. MCAP_CSP_ERROR,
  2582. MCAP_ERROR_RESOURCE_UNAVAILABLE,
  2583. "Pending CSP request");
  2584. return;
  2585. }
  2586. mcl->csp->csp_req = MCAP_MD_SYNC_SET_REQ;
  2587. cmd = g_new0(mcap_md_sync_set_req, 1);
  2588. cmd->op = MCAP_MD_SYNC_SET_REQ;
  2589. cmd->timestui = update;
  2590. cmd->btclock = htonl(btclock);
  2591. cmd->timestst = hton64(timestamp);
  2592. mcl->csp->ind_expected = update;
  2593. cbdata = g_new0(struct mcap_sync_set_cbdata, 1);
  2594. cbdata->cb = cb;
  2595. cbdata->user_data = user_data;
  2596. mcl->csp->csp_priv_data = cbdata;
  2597. send_sync_cmd(mcl, cmd, sizeof(*cmd));
  2598. g_free(cmd);
  2599. }
  2600. void mcap_enable_csp(struct mcap_instance *mi)
  2601. {
  2602. mi->csp_enabled = TRUE;
  2603. }
  2604. void mcap_disable_csp(struct mcap_instance *mi)
  2605. {
  2606. mi->csp_enabled = FALSE;
  2607. }